2024-12-01 18:52:53,269 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 18:52:53,281 main DEBUG Took 0.009745 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-01 18:52:53,281 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-01 18:52:53,282 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-01 18:52:53,282 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-01 18:52:53,284 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,293 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-01 18:52:53,312 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,314 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,314 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,315 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,316 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,316 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,317 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,318 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,318 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,319 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,320 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,320 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,321 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,321 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,322 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,323 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,323 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,324 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,325 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,326 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,326 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,327 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,327 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-01 18:52:53,328 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,328 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-01 18:52:53,330 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-01 18:52:53,332 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-01 18:52:53,334 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-01 18:52:53,334 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-01 18:52:53,335 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-01 18:52:53,336 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-01 18:52:53,344 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-01 18:52:53,347 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-01 18:52:53,348 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-01 18:52:53,349 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-01 18:52:53,349 main DEBUG createAppenders(={Console}) 2024-12-01 18:52:53,350 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-01 18:52:53,350 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-01 18:52:53,350 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-01 18:52:53,351 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-01 18:52:53,351 main DEBUG OutputStream closed 2024-12-01 18:52:53,351 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-01 18:52:53,351 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-01 18:52:53,351 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-01 18:52:53,416 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-01 18:52:53,418 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-01 18:52:53,419 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-01 18:52:53,420 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-01 18:52:53,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-01 18:52:53,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-01 18:52:53,421 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-01 18:52:53,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-01 18:52:53,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-01 18:52:53,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-01 18:52:53,422 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-01 18:52:53,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-01 18:52:53,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-01 18:52:53,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-01 18:52:53,423 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-01 18:52:53,424 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-01 18:52:53,424 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-01 18:52:53,425 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-01 18:52:53,427 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01 18:52:53,427 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-01 18:52:53,427 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-01 18:52:53,428 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-01T18:52:53,444 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-01 18:52:53,446 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-01 18:52:53,447 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-01T18:52:53,693 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c 2024-12-01T18:52:53,717 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4, deleteOnExit=true 2024-12-01T18:52:53,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/test.cache.data in system properties and HBase conf 2024-12-01T18:52:53,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:52:53,719 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:52:53,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:52:53,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:52:53,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T18:52:53,809 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-01T18:52:53,906 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:52:53,909 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:52:53,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:52:53,910 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:52:53,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:52:53,911 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:52:53,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:52:53,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:52:53,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:52:53,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:52:53,913 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:52:53,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:52:53,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:52:53,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:52:53,915 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:52:54,726 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-01T18:52:54,813 INFO [Time-limited test {}] log.Log(170): Logging initialized @2219ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-01T18:52:54,903 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:52:54,965 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:52:54,988 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:52:54,989 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:52:54,991 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:52:55,005 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:52:55,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:52:55,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:52:55,195 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/java.io.tmpdir/jetty-localhost-44711-hadoop-hdfs-3_4_1-tests_jar-_-any-8078270483694728207/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:52:55,202 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:44711} 2024-12-01T18:52:55,202 INFO [Time-limited test {}] server.Server(415): Started @2609ms 2024-12-01T18:52:55,607 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:52:55,614 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:52:55,615 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:52:55,615 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:52:55,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:52:55,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:52:55,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:52:55,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d1a7cf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/java.io.tmpdir/jetty-localhost-43215-hadoop-hdfs-3_4_1-tests_jar-_-any-433631329156577873/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:52:55,746 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:43215} 2024-12-01T18:52:55,746 INFO [Time-limited test {}] server.Server(415): Started @3153ms 2024-12-01T18:52:55,804 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:52:55,934 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:52:55,941 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:52:55,945 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:52:55,946 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:52:55,946 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:52:55,949 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:52:55,950 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:52:56,087 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1548acd1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/java.io.tmpdir/jetty-localhost-44863-hadoop-hdfs-3_4_1-tests_jar-_-any-13008916395375315702/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:52:56,088 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:44863} 2024-12-01T18:52:56,088 INFO [Time-limited test {}] server.Server(415): Started @3495ms 2024-12-01T18:52:56,091 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:52:56,138 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:52:56,143 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:52:56,146 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:52:56,147 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:52:56,147 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:52:56,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:52:56,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:52:56,261 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data2/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,261 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data4/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,261 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data1/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,261 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data3/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3297a183{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/java.io.tmpdir/jetty-localhost-34133-hadoop-hdfs-3_4_1-tests_jar-_-any-1443741470976163956/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:52:56,285 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:34133} 2024-12-01T18:52:56,286 INFO [Time-limited test {}] server.Server(415): Started @3693ms 2024-12-01T18:52:56,288 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:52:56,306 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:52:56,306 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:52:56,379 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6fb0c39e9d0dc59 with lease ID 0xb21b319c47a42f79: Processing first storage report for DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5 from datanode DatanodeRegistration(127.0.0.1:33515, datanodeUuid=de2c5c9c-e165-4965-9dec-85edab90846d, infoPort=46233, infoSecurePort=0, ipcPort=34403, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6fb0c39e9d0dc59 with lease ID 0xb21b319c47a42f79: from storage DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5 node DatanodeRegistration(127.0.0.1:33515, datanodeUuid=de2c5c9c-e165-4965-9dec-85edab90846d, infoPort=46233, infoSecurePort=0, ipcPort=34403, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,381 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0c442dbe35a8192 with lease ID 0xb21b319c47a42f78: Processing first storage report for DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3 from datanode DatanodeRegistration(127.0.0.1:46831, datanodeUuid=28ebedd3-97f8-40f2-9e60-8e3491545b48, infoPort=41183, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0c442dbe35a8192 with lease ID 0xb21b319c47a42f78: from storage DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3 node DatanodeRegistration(127.0.0.1:46831, datanodeUuid=28ebedd3-97f8-40f2-9e60-8e3491545b48, infoPort=41183, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc6fb0c39e9d0dc59 with lease ID 0xb21b319c47a42f79: Processing first storage report for DS-505ea74e-c9f8-4e90-b4a4-fa3ff1e77897 from datanode DatanodeRegistration(127.0.0.1:33515, datanodeUuid=de2c5c9c-e165-4965-9dec-85edab90846d, infoPort=46233, infoSecurePort=0, ipcPort=34403, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,382 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc6fb0c39e9d0dc59 with lease ID 0xb21b319c47a42f79: from storage DS-505ea74e-c9f8-4e90-b4a4-fa3ff1e77897 node DatanodeRegistration(127.0.0.1:33515, datanodeUuid=de2c5c9c-e165-4965-9dec-85edab90846d, infoPort=46233, infoSecurePort=0, ipcPort=34403, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,383 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa0c442dbe35a8192 with lease ID 0xb21b319c47a42f78: Processing first storage report for DS-be860442-83e1-4ce2-aeb1-c2b12b2d08e4 from datanode DatanodeRegistration(127.0.0.1:46831, datanodeUuid=28ebedd3-97f8-40f2-9e60-8e3491545b48, infoPort=41183, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,383 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa0c442dbe35a8192 with lease ID 0xb21b319c47a42f78: from storage DS-be860442-83e1-4ce2-aeb1-c2b12b2d08e4 node DatanodeRegistration(127.0.0.1:46831, datanodeUuid=28ebedd3-97f8-40f2-9e60-8e3491545b48, infoPort=41183, infoSecurePort=0, ipcPort=38463, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,445 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data5/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,445 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data6/current/BP-1910344294-172.17.0.2-1733079174471/current, will proceed with Du for space computation calculation, 2024-12-01T18:52:56,473 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:52:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd7d93658fdcc676 with lease ID 0xb21b319c47a42f7a: Processing first storage report for DS-b6d017bb-2401-4aaf-b99b-1290247b2597 from datanode DatanodeRegistration(127.0.0.1:46223, datanodeUuid=0c5e389f-64bb-4742-a919-e3da733d550b, infoPort=45547, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd7d93658fdcc676 with lease ID 0xb21b319c47a42f7a: from storage DS-b6d017bb-2401-4aaf-b99b-1290247b2597 node DatanodeRegistration(127.0.0.1:46223, datanodeUuid=0c5e389f-64bb-4742-a919-e3da733d550b, infoPort=45547, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd7d93658fdcc676 with lease ID 0xb21b319c47a42f7a: Processing first storage report for DS-d539f89f-fe0e-45c6-b1c7-5997941bbfb8 from datanode DatanodeRegistration(127.0.0.1:46223, datanodeUuid=0c5e389f-64bb-4742-a919-e3da733d550b, infoPort=45547, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471) 2024-12-01T18:52:56,482 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd7d93658fdcc676 with lease ID 0xb21b319c47a42f7a: from storage DS-d539f89f-fe0e-45c6-b1c7-5997941bbfb8 node DatanodeRegistration(127.0.0.1:46223, datanodeUuid=0c5e389f-64bb-4742-a919-e3da733d550b, infoPort=45547, infoSecurePort=0, ipcPort=39139, storageInfo=lv=-57;cid=testClusterID;nsid=1920369228;c=1733079174471), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-01T18:52:56,685 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c 2024-12-01T18:52:56,760 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-01T18:52:56,814 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=131, ProcessCount=11, AvailableMemoryMB=4639 2024-12-01T18:52:56,817 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:52:56,826 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-01T18:52:56,910 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/zookeeper_0, clientPort=53882, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:52:56,920 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=53882 2024-12-01T18:52:56,934 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:56,938 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:57,017 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:57,018 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:57,071 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:44598 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44598 dst: /127.0.0.1:33515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:57,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-01T18:52:57,489 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:57,498 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a with version=8 2024-12-01T18:52:57,499 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/hbase-staging 2024-12-01T18:52:57,595 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-01T18:52:57,842 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:52:57,853 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:57,854 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:57,858 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:52:57,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:57,859 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:52:57,999 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T18:52:58,070 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-01T18:52:58,079 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-01T18:52:58,083 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:52:58,111 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 8467 (auto-detected) 2024-12-01T18:52:58,112 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-01T18:52:58,131 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35907 2024-12-01T18:52:58,153 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35907 connecting to ZooKeeper ensemble=127.0.0.1:53882 2024-12-01T18:52:58,186 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359070x0, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:52:58,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35907-0x1016f5f814c0000 connected 2024-12-01T18:52:58,214 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,217 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:52:58,233 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a, hbase.cluster.distributed=false 2024-12-01T18:52:58,257 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:52:58,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35907 2024-12-01T18:52:58,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35907 2024-12-01T18:52:58,265 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35907 2024-12-01T18:52:58,265 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35907 2024-12-01T18:52:58,266 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35907 2024-12-01T18:52:58,377 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:52:58,378 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,379 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,379 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:52:58,379 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,380 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:52:58,383 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:52:58,385 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:52:58,386 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39281 2024-12-01T18:52:58,388 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39281 connecting to ZooKeeper ensemble=127.0.0.1:53882 2024-12-01T18:52:58,389 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,393 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,401 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392810x0, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:52:58,402 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39281-0x1016f5f814c0001 connected 2024-12-01T18:52:58,402 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:52:58,406 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:52:58,415 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:52:58,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:52:58,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:52:58,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39281 2024-12-01T18:52:58,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39281 2024-12-01T18:52:58,424 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39281 2024-12-01T18:52:58,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39281 2024-12-01T18:52:58,425 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39281 2024-12-01T18:52:58,442 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:52:58,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,443 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:52:58,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,444 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:52:58,444 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:52:58,444 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:52:58,445 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40531 2024-12-01T18:52:58,446 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40531 connecting to ZooKeeper ensemble=127.0.0.1:53882 2024-12-01T18:52:58,447 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,450 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:405310x0, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:52:58,458 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:405310x0, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:52:58,458 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40531-0x1016f5f814c0002 connected 2024-12-01T18:52:58,458 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:52:58,459 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:52:58,460 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:52:58,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:52:58,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40531 2024-12-01T18:52:58,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40531 2024-12-01T18:52:58,465 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40531 2024-12-01T18:52:58,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40531 2024-12-01T18:52:58,466 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40531 2024-12-01T18:52:58,484 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:52:58,484 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:52:58,485 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:52:58,485 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43445 2024-12-01T18:52:58,487 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43445 connecting to ZooKeeper ensemble=127.0.0.1:53882 2024-12-01T18:52:58,488 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,491 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434450x0, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:52:58,496 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43445-0x1016f5f814c0003 connected 2024-12-01T18:52:58,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:52:58,496 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:52:58,497 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:52:58,498 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:52:58,500 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:52:58,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43445 2024-12-01T18:52:58,501 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43445 2024-12-01T18:52:58,502 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43445 2024-12-01T18:52:58,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43445 2024-12-01T18:52:58,503 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43445 2024-12-01T18:52:58,519 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9ec37ae3355e:35907 2024-12-01T18:52:58,520 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:58,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,527 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,529 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,555 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:52:58,556 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9ec37ae3355e,35907,1733079177647 from backup master directory 2024-12-01T18:52:58,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:58,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:52:58,560 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:52:58,560 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:58,562 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-01T18:52:58,563 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-01T18:52:58,626 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/hbase.id] with ID: 613ede39-e2e7-4ea9-abf9-7a7778fbdd41 2024-12-01T18:52:58,627 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/.tmp/hbase.id 2024-12-01T18:52:58,633 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,634 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,637 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:37400 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37400 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:58,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-01T18:52:58,643 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:58,644 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/.tmp/hbase.id]:[hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/hbase.id] 2024-12-01T18:52:58,685 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:52:58,689 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T18:52:58,707 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 17ms. 2024-12-01T18:52:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:58,722 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,723 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:47048 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47048 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:58,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-01T18:52:58,732 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:58,747 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:52:58,749 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:52:58,754 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T18:52:58,781 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,781 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:47076 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47076 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:58,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-01T18:52:58,790 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:58,808 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store 2024-12-01T18:52:58,823 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,823 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:58,827 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:37424 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37424 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:58,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-01T18:52:58,832 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:58,837 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-01T18:52:58,839 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:52:58,840 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:52:58,840 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:52:58,841 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:52:58,842 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:52:58,842 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:52:58,842 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:52:58,843 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733079178840Disabling compacts and flushes for region at 1733079178840Disabling writes for close at 1733079178842 (+2 ms)Writing region close event to WAL at 1733079178842Closed at 1733079178842 2024-12-01T18:52:58,845 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/.initializing 2024-12-01T18:52:58,845 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/WALs/9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:58,854 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T18:52:58,867 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C35907%2C1733079177647, suffix=, logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/WALs/9ec37ae3355e,35907,1733079177647, archiveDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/oldWALs, maxLogs=10 2024-12-01T18:52:58,895 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/WALs/9ec37ae3355e,35907,1733079177647/9ec37ae3355e%2C35907%2C1733079177647.1733079178871, exclude list is [], retry=0 2024-12-01T18:52:58,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:58,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46223,DS-b6d017bb-2401-4aaf-b99b-1290247b2597,DISK] 2024-12-01T18:52:58,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33515,DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5,DISK] 2024-12-01T18:52:58,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46831,DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3,DISK] 2024-12-01T18:52:58,919 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-01T18:52:58,959 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/WALs/9ec37ae3355e,35907,1733079177647/9ec37ae3355e%2C35907%2C1733079177647.1733079178871 2024-12-01T18:52:58,960 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45547:45547),(127.0.0.1/127.0.0.1:46233:46233),(127.0.0.1/127.0.0.1:41183:41183)] 2024-12-01T18:52:58,960 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:52:58,961 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:52:58,964 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:58,965 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,030 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:52:59,034 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:52:59,037 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,040 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:52:59,041 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:52:59,042 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,045 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:52:59,045 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:52:59,046 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,048 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:52:59,049 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,049 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:52:59,050 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,053 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,054 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,060 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,060 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,064 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:52:59,067 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:52:59,073 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:52:59,074 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58884225, jitterRate=-0.122556671500206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:52:59,080 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733079178978Initializing all the Stores at 1733079178980 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079178980Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079178981 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079178981Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079178981Cleaning up temporary data from old regions at 1733079179060 (+79 ms)Region opened successfully at 1733079179080 (+20 ms) 2024-12-01T18:52:59,081 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:52:59,119 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ddfbac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:52:59,152 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T18:52:59,164 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:52:59,164 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:52:59,167 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:52:59,169 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-01T18:52:59,174 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-12-01T18:52:59,174 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:52:59,201 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:52:59,210 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:52:59,212 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:52:59,215 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:52:59,217 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:52:59,219 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:52:59,222 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:52:59,226 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:52:59,228 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:52:59,229 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:52:59,231 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:52:59,249 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:52:59,250 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:52:59,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:52:59,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:52:59,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:52:59,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:52:59,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,263 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9ec37ae3355e,35907,1733079177647, sessionid=0x1016f5f814c0000, setting cluster-up flag (Was=false) 2024-12-01T18:52:59,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,275 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,281 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:52:59,283 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:59,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:52:59,295 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:52:59,296 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9ec37ae3355e,35907,1733079177647 2024-12-01T18:52:59,302 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T18:52:59,307 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(746): ClusterId : 613ede39-e2e7-4ea9-abf9-7a7778fbdd41 2024-12-01T18:52:59,307 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(746): ClusterId : 613ede39-e2e7-4ea9-abf9-7a7778fbdd41 2024-12-01T18:52:59,308 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(746): ClusterId : 613ede39-e2e7-4ea9-abf9-7a7778fbdd41 2024-12-01T18:52:59,310 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:52:59,310 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:52:59,310 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:52:59,315 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:52:59,315 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:52:59,315 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:52:59,315 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:52:59,315 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:52:59,315 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:52:59,320 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:52:59,320 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:52:59,320 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:52:59,321 DEBUG [RS:0;9ec37ae3355e:39281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e3e1342, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:52:59,321 DEBUG [RS:2;9ec37ae3355e:43445 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d46ec9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:52:59,321 DEBUG [RS:1;9ec37ae3355e:40531 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5277996, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:52:59,340 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9ec37ae3355e:39281 2024-12-01T18:52:59,344 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9ec37ae3355e:43445 2024-12-01T18:52:59,344 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:52:59,344 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:52:59,344 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:52:59,344 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:52:59,344 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9ec37ae3355e:40531 2024-12-01T18:52:59,345 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:52:59,345 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:52:59,345 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:52:59,345 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:52:59,345 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:52:59,348 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=39281, startcode=1733079178336 2024-12-01T18:52:59,348 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=40531, startcode=1733079178442 2024-12-01T18:52:59,348 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=43445, startcode=1733079178483 2024-12-01T18:52:59,361 DEBUG [RS:1;9ec37ae3355e:40531 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:52:59,361 DEBUG [RS:0;9ec37ae3355e:39281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:52:59,362 DEBUG [RS:2;9ec37ae3355e:43445 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:52:59,401 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41229, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:52:59,401 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48439, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:52:59,401 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:52:59,402 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T18:52:59,407 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-01T18:52:59,412 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-01T18:52:59,413 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-01T18:52:59,415 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T18:52:59,424 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:52:59,433 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-01T18:52:59,433 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-01T18:52:59,433 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-01T18:52:59,433 WARN [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-01T18:52:59,433 WARN [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-01T18:52:59,433 WARN [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-01T18:52:59,431 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9ec37ae3355e,35907,1733079177647 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:52:59,439 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:52:59,439 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:52:59,440 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:52:59,440 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:52:59,440 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9ec37ae3355e:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:52:59,440 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,440 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:52:59,441 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,442 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733079209442 2024-12-01T18:52:59,443 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:52:59,444 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:52:59,447 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:52:59,447 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:52:59,448 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:52:59,449 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:52:59,449 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:52:59,449 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:52:59,450 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,452 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:52:59,453 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:52:59,454 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:52:59,454 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,455 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:52:59,456 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:52:59,456 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:52:59,458 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079179457,5,FailOnTimeoutGroup] 2024-12-01T18:52:59,458 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079179458,5,FailOnTimeoutGroup] 2024-12-01T18:52:59,459 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,459 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:52:59,460 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,461 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,466 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:59,466 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:59,470 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:44636 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:33515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44636 dst: /127.0.0.1:33515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:59,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-01T18:52:59,475 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:59,476 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T18:52:59,477 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a 2024-12-01T18:52:59,494 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:59,494 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:52:59,499 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:47094 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47094 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:52:59,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-01T18:52:59,509 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:52:59,510 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:52:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-01T18:52:59,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-01T18:52:59,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:52:59,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:52:59,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:52:59,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T18:52:59,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T18:52:59,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:52:59,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:52:59,523 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:52:59,523 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:52:59,524 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:52:59,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:52:59,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:52:59,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:52:59,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T18:52:59,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740 2024-12-01T18:52:59,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740 2024-12-01T18:52:59,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T18:52:59,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T18:52:59,533 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:52:59,535 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=39281, startcode=1733079178336 2024-12-01T18:52:59,535 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=40531, startcode=1733079178442 2024-12-01T18:52:59,535 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,35907,1733079177647 with port=43445, startcode=1733079178483 2024-12-01T18:52:59,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T18:52:59,537 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,540 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,548 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:52:59,548 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,549 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,549 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a 2024-12-01T18:52:59,549 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33839 2024-12-01T18:52:59,549 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:52:59,549 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58762393, jitterRate=-0.12437210977077484}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:52:59,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733079179510Initializing all the Stores at 1733079179513 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079179513Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079179513Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079179513Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079179513Cleaning up temporary data from old regions at 1733079179532 (+19 ms)Region opened successfully at 1733079179553 (+21 ms) 2024-12-01T18:52:59,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:52:59,554 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T18:52:59,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T18:52:59,554 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:52:59,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:52:59,554 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:52:59,555 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a 2024-12-01T18:52:59,555 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33839 2024-12-01T18:52:59,555 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:52:59,555 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,555 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35907 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,555 DEBUG [RS:2;9ec37ae3355e:43445 {}] zookeeper.ZKUtil(111): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,555 WARN [RS:2;9ec37ae3355e:43445 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:52:59,556 INFO [RS:2;9ec37ae3355e:43445 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T18:52:59,556 DEBUG [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,557 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T18:52:59,557 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733079179554Disabling compacts and flushes for region at 1733079179554Disabling writes for close at 1733079179554Writing region close event to WAL at 1733079179556 (+2 ms)Closed at 1733079179557 (+1 ms) 2024-12-01T18:52:59,559 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a 2024-12-01T18:52:59,559 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33839 2024-12-01T18:52:59,560 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:52:59,562 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:52:59,562 DEBUG [RS:0;9ec37ae3355e:39281 {}] zookeeper.ZKUtil(111): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,562 WARN [RS:0;9ec37ae3355e:39281 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:52:59,563 INFO [RS:0;9ec37ae3355e:39281 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T18:52:59,563 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,563 DEBUG [RS:1;9ec37ae3355e:40531 {}] zookeeper.ZKUtil(111): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,563 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,39281,1733079178336] 2024-12-01T18:52:59,563 WARN [RS:1;9ec37ae3355e:40531 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:52:59,563 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,43445,1733079178483] 2024-12-01T18:52:59,563 INFO [RS:1;9ec37ae3355e:40531 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T18:52:59,563 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,564 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,40531,1733079178442] 2024-12-01T18:52:59,564 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:52:59,565 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T18:52:59,572 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:52:59,582 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:52:59,586 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:52:59,589 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:52:59,590 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:52:59,590 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:52:59,604 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:52:59,604 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:52:59,604 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:52:59,610 INFO [RS:0;9ec37ae3355e:39281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:52:59,610 INFO [RS:1;9ec37ae3355e:40531 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:52:59,610 INFO [RS:2;9ec37ae3355e:43445 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:52:59,610 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,610 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,610 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,613 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:52:59,613 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:52:59,615 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:52:59,620 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:52:59,620 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:52:59,620 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:52:59,621 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,621 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,621 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:52:59,622 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,622 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:2;9ec37ae3355e:43445 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,623 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,623 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,624 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:52:59,624 DEBUG [RS:0;9ec37ae3355e:39281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,624 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,624 DEBUG [RS:1;9ec37ae3355e:40531 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:52:59,628 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,628 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,628 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,39281,1733079178336-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,629 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,43445,1733079178483-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,631 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,40531,1733079178442-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:52:59,651 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:52:59,653 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,43445,1733079178483-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,654 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,654 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.Replication(171): 9ec37ae3355e,43445,1733079178483 started 2024-12-01T18:52:59,655 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:52:59,656 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,39281,1733079178336-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,656 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,656 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.Replication(171): 9ec37ae3355e,39281,1733079178336 started 2024-12-01T18:52:59,657 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:52:59,657 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,40531,1733079178442-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,657 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,658 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.Replication(171): 9ec37ae3355e,40531,1733079178442 started 2024-12-01T18:52:59,672 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,673 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,43445,1733079178483, RpcServer on 9ec37ae3355e/172.17.0.2:43445, sessionid=0x1016f5f814c0003 2024-12-01T18:52:59,674 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:52:59,674 DEBUG [RS:2;9ec37ae3355e:43445 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,674 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,43445,1733079178483' 2024-12-01T18:52:59,674 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:52:59,675 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:52:59,676 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:52:59,676 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:52:59,676 DEBUG [RS:2;9ec37ae3355e:43445 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,43445,1733079178483 2024-12-01T18:52:59,676 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,43445,1733079178483' 2024-12-01T18:52:59,676 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:52:59,677 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:52:59,678 DEBUG [RS:2;9ec37ae3355e:43445 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:52:59,678 INFO [RS:2;9ec37ae3355e:43445 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:52:59,678 INFO [RS:2;9ec37ae3355e:43445 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:52:59,680 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,680 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,39281,1733079178336, RpcServer on 9ec37ae3355e/172.17.0.2:39281, sessionid=0x1016f5f814c0001 2024-12-01T18:52:59,681 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:52:59,681 DEBUG [RS:0;9ec37ae3355e:39281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,681 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,39281,1733079178336' 2024-12-01T18:52:59,681 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:52:59,681 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:52:59,681 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,40531,1733079178442, RpcServer on 9ec37ae3355e/172.17.0.2:40531, sessionid=0x1016f5f814c0002 2024-12-01T18:52:59,682 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:52:59,682 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:52:59,682 DEBUG [RS:1;9ec37ae3355e:40531 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,682 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,40531,1733079178442' 2024-12-01T18:52:59,682 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:52:59,682 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:52:59,682 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:52:59,682 DEBUG [RS:0;9ec37ae3355e:39281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,39281,1733079178336 2024-12-01T18:52:59,683 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,39281,1733079178336' 2024-12-01T18:52:59,683 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:52:59,683 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:52:59,683 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:52:59,683 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:52:59,683 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:52:59,684 DEBUG [RS:1;9ec37ae3355e:40531 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,40531,1733079178442 2024-12-01T18:52:59,684 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,40531,1733079178442' 2024-12-01T18:52:59,684 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:52:59,684 DEBUG [RS:0;9ec37ae3355e:39281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:52:59,684 INFO [RS:0;9ec37ae3355e:39281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:52:59,684 INFO [RS:0;9ec37ae3355e:39281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:52:59,684 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:52:59,685 DEBUG [RS:1;9ec37ae3355e:40531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:52:59,685 INFO [RS:1;9ec37ae3355e:40531 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:52:59,685 INFO [RS:1;9ec37ae3355e:40531 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:52:59,737 WARN [9ec37ae3355e:35907 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:52:59,783 INFO [RS:2;9ec37ae3355e:43445 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T18:52:59,785 INFO [RS:0;9ec37ae3355e:39281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T18:52:59,786 INFO [RS:1;9ec37ae3355e:40531 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-01T18:52:59,787 INFO [RS:2;9ec37ae3355e:43445 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C43445%2C1733079178483, suffix=, logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,43445,1733079178483, archiveDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs, maxLogs=32 2024-12-01T18:52:59,788 INFO [RS:0;9ec37ae3355e:39281 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C39281%2C1733079178336, suffix=, logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,39281,1733079178336, archiveDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs, maxLogs=32 2024-12-01T18:52:59,789 INFO [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C40531%2C1733079178442, suffix=, logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442, archiveDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs, maxLogs=32 2024-12-01T18:52:59,805 DEBUG [RS:2;9ec37ae3355e:43445 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,43445,1733079178483/9ec37ae3355e%2C43445%2C1733079178483.1733079179791, exclude list is [], retry=0 2024-12-01T18:52:59,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33515,DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5,DISK] 2024-12-01T18:52:59,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46223,DS-b6d017bb-2401-4aaf-b99b-1290247b2597,DISK] 2024-12-01T18:52:59,811 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46831,DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3,DISK] 2024-12-01T18:52:59,829 DEBUG [RS:0;9ec37ae3355e:39281 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,39281,1733079178336/9ec37ae3355e%2C39281%2C1733079178336.1733079179791, exclude list is [], retry=0 2024-12-01T18:52:59,829 DEBUG [RS:1;9ec37ae3355e:40531 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442/9ec37ae3355e%2C40531%2C1733079178442.1733079179791, exclude list is [], retry=0 2024-12-01T18:52:59,833 INFO [RS:2;9ec37ae3355e:43445 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,43445,1733079178483/9ec37ae3355e%2C43445%2C1733079178483.1733079179791 2024-12-01T18:52:59,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33515,DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5,DISK] 2024-12-01T18:52:59,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33515,DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5,DISK] 2024-12-01T18:52:59,834 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46831,DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3,DISK] 2024-12-01T18:52:59,835 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46223,DS-b6d017bb-2401-4aaf-b99b-1290247b2597,DISK] 2024-12-01T18:52:59,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46831,DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3,DISK] 2024-12-01T18:52:59,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46223,DS-b6d017bb-2401-4aaf-b99b-1290247b2597,DISK] 2024-12-01T18:52:59,836 DEBUG [RS:2;9ec37ae3355e:43445 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45547:45547),(127.0.0.1/127.0.0.1:46233:46233),(127.0.0.1/127.0.0.1:41183:41183)] 2024-12-01T18:52:59,842 INFO [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442/9ec37ae3355e%2C40531%2C1733079178442.1733079179791 2024-12-01T18:52:59,843 DEBUG [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46233:46233),(127.0.0.1/127.0.0.1:41183:41183),(127.0.0.1/127.0.0.1:45547:45547)] 2024-12-01T18:52:59,843 INFO [RS:0;9ec37ae3355e:39281 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,39281,1733079178336/9ec37ae3355e%2C39281%2C1733079178336.1733079179791 2024-12-01T18:52:59,844 DEBUG [RS:0;9ec37ae3355e:39281 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46233:46233),(127.0.0.1/127.0.0.1:45547:45547),(127.0.0.1/127.0.0.1:41183:41183)] 2024-12-01T18:52:59,990 DEBUG [9ec37ae3355e:35907 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T18:52:59,998 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(204): Hosts are {9ec37ae3355e=0} racks are {/default-rack=0} 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T18:53:00,004 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T18:53:00,004 INFO [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T18:53:00,004 INFO [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T18:53:00,004 INFO [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T18:53:00,005 DEBUG [9ec37ae3355e:35907 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T18:53:00,012 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9ec37ae3355e,40531,1733079178442 2024-12-01T18:53:00,020 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9ec37ae3355e,40531,1733079178442, state=OPENING 2024-12-01T18:53:00,025 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:53:00,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:00,027 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:00,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,028 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,029 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:53:00,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9ec37ae3355e,40531,1733079178442}] 2024-12-01T18:53:00,206 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:53:00,209 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40443, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:53:00,220 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T18:53:00,221 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-01T18:53:00,221 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-01T18:53:00,228 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C40531%2C1733079178442.meta, suffix=.meta, logDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442, archiveDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs, maxLogs=32 2024-12-01T18:53:00,243 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442/9ec37ae3355e%2C40531%2C1733079178442.meta.1733079180229.meta, exclude list is [], retry=0 2024-12-01T18:53:00,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46831,DS-1348173a-e9d0-4d8c-ba63-313fe29dc5a3,DISK] 2024-12-01T18:53:00,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33515,DS-6f54506a-1cb6-48e6-af67-e3a4d0391ff5,DISK] 2024-12-01T18:53:00,247 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:46223,DS-b6d017bb-2401-4aaf-b99b-1290247b2597,DISK] 2024-12-01T18:53:00,251 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/WALs/9ec37ae3355e,40531,1733079178442/9ec37ae3355e%2C40531%2C1733079178442.meta.1733079180229.meta 2024-12-01T18:53:00,251 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:46233:46233),(127.0.0.1/127.0.0.1:41183:41183),(127.0.0.1/127.0.0.1:45547:45547)] 2024-12-01T18:53:00,251 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:53:00,253 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:53:00,256 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:53:00,260 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:53:00,265 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:53:00,265 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:00,265 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T18:53:00,266 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T18:53:00,269 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:53:00,270 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:53:00,270 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:00,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:00,271 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T18:53:00,272 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T18:53:00,273 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:00,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:00,273 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:53:00,275 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:53:00,275 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:00,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:00,276 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:53:00,277 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:53:00,277 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:00,278 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:00,278 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T18:53:00,279 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740 2024-12-01T18:53:00,281 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740 2024-12-01T18:53:00,284 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T18:53:00,284 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T18:53:00,285 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:53:00,287 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T18:53:00,289 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60574994, jitterRate=-0.09736225008964539}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:53:00,289 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T18:53:00,290 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733079180266Writing region info on filesystem at 1733079180266Initializing all the Stores at 1733079180268 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079180268Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079180268Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079180268Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079180268Cleaning up temporary data from old regions at 1733079180284 (+16 ms)Running coprocessor post-open hooks at 1733079180289 (+5 ms)Region opened successfully at 1733079180290 (+1 ms) 2024-12-01T18:53:00,297 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733079180197 2024-12-01T18:53:00,308 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:53:00,309 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T18:53:00,310 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9ec37ae3355e,40531,1733079178442 2024-12-01T18:53:00,312 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9ec37ae3355e,40531,1733079178442, state=OPEN 2024-12-01T18:53:00,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:00,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:00,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:00,316 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:00,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,316 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:00,317 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9ec37ae3355e,40531,1733079178442 2024-12-01T18:53:00,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:53:00,323 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9ec37ae3355e,40531,1733079178442 in 286 msec 2024-12-01T18:53:00,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:53:00,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 753 msec 2024-12-01T18:53:00,331 DEBUG [PEWorker-5 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:53:00,331 INFO [PEWorker-5 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T18:53:00,350 DEBUG [PEWorker-5 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T18:53:00,352 DEBUG [PEWorker-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9ec37ae3355e,40531,1733079178442, seqNum=-1] 2024-12-01T18:53:00,372 DEBUG [PEWorker-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:00,375 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53741, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:00,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0510 sec 2024-12-01T18:53:00,392 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733079180392, completionTime=-1 2024-12-01T18:53:00,394 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T18:53:00,395 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T18:53:00,419 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T18:53:00,419 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733079240419 2024-12-01T18:53:00,420 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733079300420 2024-12-01T18:53:00,420 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-12-01T18:53:00,421 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-01T18:53:00,427 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,427 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,428 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,429 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9ec37ae3355e:35907, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,429 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,430 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,435 DEBUG [master/9ec37ae3355e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T18:53:00,456 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.896sec 2024-12-01T18:53:00,457 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:53:00,458 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:53:00,459 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:53:00,460 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:53:00,460 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:53:00,460 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:53:00,461 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:53:00,466 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:53:00,466 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:53:00,467 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,35907,1733079177647-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:00,518 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54b83ce1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:00,522 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-01T18:53:00,522 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-01T18:53:00,526 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9ec37ae3355e,35907,-1 for getting cluster id 2024-12-01T18:53:00,528 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T18:53:00,535 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '613ede39-e2e7-4ea9-abf9-7a7778fbdd41' 2024-12-01T18:53:00,537 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T18:53:00,537 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "613ede39-e2e7-4ea9-abf9-7a7778fbdd41" 2024-12-01T18:53:00,539 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28a5a4c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:00,539 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9ec37ae3355e,35907,-1] 2024-12-01T18:53:00,542 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T18:53:00,543 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:00,545 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56810, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T18:53:00,547 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50fba94c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:00,548 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T18:53:00,555 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9ec37ae3355e,40531,1733079178442, seqNum=-1] 2024-12-01T18:53:00,555 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:00,557 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49616, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:00,577 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9ec37ae3355e,35907,1733079177647 2024-12-01T18:53:00,581 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T18:53:00,585 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 9ec37ae3355e,35907,1733079177647 2024-12-01T18:53:00,587 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4db4b8aa 2024-12-01T18:53:00,587 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:53:00,589 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56822, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:53:00,594 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:53:00,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T18:53:00,604 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:53:00,606 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T18:53:00,606 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:00,608 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:53:00,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:00,616 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:00,616 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:00,622 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:47170 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47170 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:00,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-01T18:53:00,628 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:00,630 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 26c28ed48705b26d77a7e3cac0a7ae69, NAME => 'TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a 2024-12-01T18:53:00,635 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:00,635 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:00,638 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:47192 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47192 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:00,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-01T18:53:00,643 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:00,643 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:00,643 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 26c28ed48705b26d77a7e3cac0a7ae69, disabling compactions & flushes 2024-12-01T18:53:00,644 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:00,644 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:00,644 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. after waiting 0 ms 2024-12-01T18:53:00,644 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:00,644 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:00,644 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 26c28ed48705b26d77a7e3cac0a7ae69: Waiting for close lock at 1733079180643Disabling compacts and flushes for region at 1733079180643Disabling writes for close at 1733079180644 (+1 ms)Writing region close event to WAL at 1733079180644Closed at 1733079180644 2024-12-01T18:53:00,647 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:53:00,653 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733079180647"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733079180647"}]},"ts":"1733079180647"} 2024-12-01T18:53:00,658 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T18:53:00,660 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:53:00,663 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733079180660"}]},"ts":"1733079180660"} 2024-12-01T18:53:00,667 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T18:53:00,668 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {9ec37ae3355e=0} racks are {/default-rack=0} 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T18:53:00,669 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T18:53:00,669 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T18:53:00,669 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T18:53:00,669 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T18:53:00,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=26c28ed48705b26d77a7e3cac0a7ae69, ASSIGN}] 2024-12-01T18:53:00,673 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=26c28ed48705b26d77a7e3cac0a7ae69, ASSIGN 2024-12-01T18:53:00,675 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=26c28ed48705b26d77a7e3cac0a7ae69, ASSIGN; state=OFFLINE, location=9ec37ae3355e,39281,1733079178336; forceNewPlan=false, retain=false 2024-12-01T18:53:00,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:00,828 INFO [9ec37ae3355e:35907 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T18:53:00,828 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=26c28ed48705b26d77a7e3cac0a7ae69, regionState=OPENING, regionLocation=9ec37ae3355e,39281,1733079178336 2024-12-01T18:53:00,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=26c28ed48705b26d77a7e3cac0a7ae69, ASSIGN because future has completed 2024-12-01T18:53:00,834 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26c28ed48705b26d77a7e3cac0a7ae69, server=9ec37ae3355e,39281,1733079178336}] 2024-12-01T18:53:00,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:00,989 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:53:00,991 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49281, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:53:00,998 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:00,998 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 26c28ed48705b26d77a7e3cac0a7ae69, NAME => 'TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:53:00,998 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:00,998 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:00,998 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:00,998 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,001 INFO [StoreOpener-26c28ed48705b26d77a7e3cac0a7ae69-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,003 INFO [StoreOpener-26c28ed48705b26d77a7e3cac0a7ae69-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 26c28ed48705b26d77a7e3cac0a7ae69 columnFamilyName cf 2024-12-01T18:53:01,003 DEBUG [StoreOpener-26c28ed48705b26d77a7e3cac0a7ae69-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:01,004 INFO [StoreOpener-26c28ed48705b26d77a7e3cac0a7ae69-1 {}] regionserver.HStore(327): Store=26c28ed48705b26d77a7e3cac0a7ae69/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:53:01,004 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,005 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,006 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,006 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,007 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,010 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,016 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:53:01,017 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 26c28ed48705b26d77a7e3cac0a7ae69; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69694944, jitterRate=0.03853559494018555}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:53:01,017 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:01,018 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 26c28ed48705b26d77a7e3cac0a7ae69: Running coprocessor pre-open hook at 1733079180999Writing region info on filesystem at 1733079180999Initializing all the Stores at 1733079181000 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079181000Cleaning up temporary data from old regions at 1733079181007 (+7 ms)Running coprocessor post-open hooks at 1733079181017 (+10 ms)Region opened successfully at 1733079181018 (+1 ms) 2024-12-01T18:53:01,020 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69., pid=6, masterSystemTime=1733079180988 2024-12-01T18:53:01,024 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:01,024 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:01,025 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=26c28ed48705b26d77a7e3cac0a7ae69, regionState=OPEN, openSeqNum=2, regionLocation=9ec37ae3355e,39281,1733079178336 2024-12-01T18:53:01,029 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 26c28ed48705b26d77a7e3cac0a7ae69, server=9ec37ae3355e,39281,1733079178336 because future has completed 2024-12-01T18:53:01,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:53:01,035 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 26c28ed48705b26d77a7e3cac0a7ae69, server=9ec37ae3355e,39281,1733079178336 in 197 msec 2024-12-01T18:53:01,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:53:01,039 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=26c28ed48705b26d77a7e3cac0a7ae69, ASSIGN in 364 msec 2024-12-01T18:53:01,040 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:53:01,040 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733079181040"}]},"ts":"1733079181040"} 2024-12-01T18:53:01,043 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T18:53:01,045 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:53:01,047 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 448 msec 2024-12-01T18:53:01,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:01,243 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T18:53:01,243 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T18:53:01,245 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:53:01,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T18:53:01,251 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:53:01,252 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T18:53:01,260 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69., hostname=9ec37ae3355e,39281,1733079178336, seqNum=2] 2024-12-01T18:53:01,262 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:01,264 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:01,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T18:53:01,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T18:53:01,282 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:53:01,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:01,283 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:53:01,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:53:01,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:01,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39281 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T18:53:01,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:01,451 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 26c28ed48705b26d77a7e3cac0a7ae69 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T18:53:01,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/.tmp/cf/b7cd65588be64c8c879d67424e34f640 is 36, key is row/cf:cq/1733079181265/Put/seqid=0 2024-12-01T18:53:01,516 WARN [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:01,517 WARN [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:01,521 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_479716352_22 at /127.0.0.1:57992 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57992 dst: /127.0.0.1:33515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:01,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-01T18:53:01,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:01,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:01,930 WARN [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:01,930 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/.tmp/cf/b7cd65588be64c8c879d67424e34f640 2024-12-01T18:53:01,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/.tmp/cf/b7cd65588be64c8c879d67424e34f640 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/cf/b7cd65588be64c8c879d67424e34f640 2024-12-01T18:53:01,992 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/cf/b7cd65588be64c8c879d67424e34f640, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T18:53:02,000 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 26c28ed48705b26d77a7e3cac0a7ae69 in 547ms, sequenceid=5, compaction requested=false 2024-12-01T18:53:02,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-01T18:53:02,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 26c28ed48705b26d77a7e3cac0a7ae69: 2024-12-01T18:53:02,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T18:53:02,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T18:53:02,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T18:53:02,017 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 728 msec 2024-12-01T18:53:02,021 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 743 msec 2024-12-01T18:53:02,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-01T18:53:02,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-01T18:53:02,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-01T18:53:02,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-01T18:53:02,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-01T18:53:02,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-01T18:53:02,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35907 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:02,423 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T18:53:02,437 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T18:53:02,438 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T18:53:02,438 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:02,442 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,443 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T18:53:02,443 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:53:02,443 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=806158981, stopped=false 2024-12-01T18:53:02,444 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9ec37ae3355e,35907,1733079177647 2024-12-01T18:53:02,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:02,445 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:02,446 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T18:53:02,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:02,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:02,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:02,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:02,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:02,447 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T18:53:02,447 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:02,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,39281,1733079178336' ***** 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,40531,1733079178442' ***** 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,43445,1733079178483' ***** 2024-12-01T18:53:02,448 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:02,448 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:02,448 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:02,449 INFO [RS:0;9ec37ae3355e:39281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:02,449 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:02,449 INFO [RS:1;9ec37ae3355e:40531 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:02,449 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:02,449 INFO [RS:0;9ec37ae3355e:39281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:02,449 INFO [RS:1;9ec37ae3355e:40531 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,43445,1733079178483 2024-12-01T18:53:02,449 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,40531,1733079178442 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:02,449 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:02,449 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(3091): Received CLOSE for 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:02,449 INFO [RS:2;9ec37ae3355e:43445 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9ec37ae3355e:43445. 2024-12-01T18:53:02,449 INFO [RS:1;9ec37ae3355e:40531 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9ec37ae3355e:40531. 2024-12-01T18:53:02,449 DEBUG [RS:2;9ec37ae3355e:43445 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:02,449 DEBUG [RS:1;9ec37ae3355e:40531 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:02,449 DEBUG [RS:2;9ec37ae3355e:43445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,449 DEBUG [RS:1;9ec37ae3355e:40531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,450 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:02,450 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:02,450 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:02,450 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:02,450 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,39281,1733079178336 2024-12-01T18:53:02,450 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,43445,1733079178483; all regions closed. 2024-12-01T18:53:02,450 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T18:53:02,450 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:02,450 INFO [RS:0;9ec37ae3355e:39281 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9ec37ae3355e:39281. 2024-12-01T18:53:02,450 DEBUG [RS:0;9ec37ae3355e:39281 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:02,450 DEBUG [RS:0;9ec37ae3355e:39281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,450 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T18:53:02,450 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T18:53:02,450 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-01T18:53:02,450 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:53:02,450 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 26c28ed48705b26d77a7e3cac0a7ae69, disabling compactions & flushes 2024-12-01T18:53:02,450 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1325): Online Regions={26c28ed48705b26d77a7e3cac0a7ae69=TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69.} 2024-12-01T18:53:02,450 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T18:53:02,450 INFO [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,450 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T18:53:02,451 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,451 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:53:02,451 DEBUG [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1351): Waiting on 26c28ed48705b26d77a7e3cac0a7ae69 2024-12-01T18:53:02,451 DEBUG [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T18:53:02,451 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. after waiting 0 ms 2024-12-01T18:53:02,451 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:53:02,451 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,451 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T18:53:02,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_1073741826_1016 (size=93) 2024-12-01T18:53:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_1073741826_1016 (size=93) 2024-12-01T18:53:02,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_1073741826_1016 (size=93) 2024-12-01T18:53:02,462 DEBUG [RS:2;9ec37ae3355e:43445 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs 2024-12-01T18:53:02,462 INFO [RS:2;9ec37ae3355e:43445 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9ec37ae3355e%2C43445%2C1733079178483:(num 1733079179791) 2024-12-01T18:53:02,462 DEBUG [RS:2;9ec37ae3355e:43445 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,462 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,462 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:02,462 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:02,463 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:02,463 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:02,463 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:02,463 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:02,463 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:02,463 INFO [RS:2;9ec37ae3355e:43445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43445 2024-12-01T18:53:02,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,43445,1733079178483 2024-12-01T18:53:02,467 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:02,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:02,469 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,43445,1733079178483] 2024-12-01T18:53:02,471 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/default/TestHBaseWalOnEC/26c28ed48705b26d77a7e3cac0a7ae69/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T18:53:02,472 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,43445,1733079178483 already deleted, retry=false 2024-12-01T18:53:02,472 INFO [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,472 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 26c28ed48705b26d77a7e3cac0a7ae69: Waiting for close lock at 1733079182450Running coprocessor pre-close hooks at 1733079182450Disabling compacts and flushes for region at 1733079182450Disabling writes for close at 1733079182451 (+1 ms)Writing region close event to WAL at 1733079182452 (+1 ms)Running coprocessor post-close hooks at 1733079182472 (+20 ms)Closed at 1733079182472 2024-12-01T18:53:02,472 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,43445,1733079178483 expired; onlineServers=2 2024-12-01T18:53:02,473 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69. 2024-12-01T18:53:02,481 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/info/5e4041ea6b584632b461ff35320aa7f5 is 153, key is TestHBaseWalOnEC,,1733079180591.26c28ed48705b26d77a7e3cac0a7ae69./info:regioninfo/1733079181025/Put/seqid=0 2024-12-01T18:53:02,484 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,485 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775708_1013 (size=1321) 2024-12-01T18:53:02,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775709_1013 (size=1321) 2024-12-01T18:53:02,489 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-478134681_22 at /127.0.0.1:40324 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40324 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-01T18:53:02,494 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,495 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/info/5e4041ea6b584632b461ff35320aa7f5 2024-12-01T18:53:02,523 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/ns/476e4664ac63471c92c1ad6937607140 is 43, key is default/ns:d/1733079180379/Put/seqid=0 2024-12-01T18:53:02,525 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,525 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,529 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-478134681_22 at /127.0.0.1:58036 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33515:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58036 dst: /127.0.0.1:33515 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-01T18:53:02,533 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,533 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,534 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,534 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/ns/476e4664ac63471c92c1ad6937607140 2024-12-01T18:53:02,534 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,559 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/table/2ce2dc0d4d0541f5a9ee8a0b8dad0ab5 is 52, key is TestHBaseWalOnEC/table:state/1733079181040/Put/seqid=0 2024-12-01T18:53:02,561 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,561 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-478134681_22 at /127.0.0.1:58710 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58710 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-01T18:53:02,569 WARN [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,569 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/table/2ce2dc0d4d0541f5a9ee8a0b8dad0ab5 2024-12-01T18:53:02,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43445-0x1016f5f814c0003, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,570 INFO [RS:2;9ec37ae3355e:43445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:02,570 INFO [RS:2;9ec37ae3355e:43445 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,43445,1733079178483; zookeeper connection closed. 2024-12-01T18:53:02,571 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5bcbaa91 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5bcbaa91 2024-12-01T18:53:02,579 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/info/5e4041ea6b584632b461ff35320aa7f5 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/info/5e4041ea6b584632b461ff35320aa7f5 2024-12-01T18:53:02,589 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/info/5e4041ea6b584632b461ff35320aa7f5, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T18:53:02,590 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/ns/476e4664ac63471c92c1ad6937607140 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/ns/476e4664ac63471c92c1ad6937607140 2024-12-01T18:53:02,599 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/ns/476e4664ac63471c92c1ad6937607140, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T18:53:02,600 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/.tmp/table/2ce2dc0d4d0541f5a9ee8a0b8dad0ab5 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/table/2ce2dc0d4d0541f5a9ee8a0b8dad0ab5 2024-12-01T18:53:02,609 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/table/2ce2dc0d4d0541f5a9ee8a0b8dad0ab5, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T18:53:02,610 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 159ms, sequenceid=11, compaction requested=false 2024-12-01T18:53:02,610 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-01T18:53:02,620 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T18:53:02,622 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:53:02,622 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T18:53:02,622 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733079182450Running coprocessor pre-close hooks at 1733079182450Disabling compacts and flushes for region at 1733079182450Disabling writes for close at 1733079182451 (+1 ms)Obtaining lock to block concurrent updates at 1733079182451Preparing flush snapshotting stores in 1588230740 at 1733079182451Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733079182452 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733079182453 (+1 ms)Flushing 1588230740/info: creating writer at 1733079182453Flushing 1588230740/info: appending metadata at 1733079182478 (+25 ms)Flushing 1588230740/info: closing flushed file at 1733079182478Flushing 1588230740/ns: creating writer at 1733079182505 (+27 ms)Flushing 1588230740/ns: appending metadata at 1733079182522 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1733079182522Flushing 1588230740/table: creating writer at 1733079182543 (+21 ms)Flushing 1588230740/table: appending metadata at 1733079182558 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733079182558Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bb480e8: reopening flushed file at 1733079182578 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@650b3b77: reopening flushed file at 1733079182589 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49de5e73: reopening flushed file at 1733079182599 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 159ms, sequenceid=11, compaction requested=false at 1733079182610 (+11 ms)Writing region close event to WAL at 1733079182613 (+3 ms)Running coprocessor post-close hooks at 1733079182621 (+8 ms)Closed at 1733079182622 (+1 ms) 2024-12-01T18:53:02,622 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:53:02,631 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:53:02,631 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:53:02,631 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:53:02,632 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:53:02,651 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,40531,1733079178442; all regions closed. 2024-12-01T18:53:02,651 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,39281,1733079178336; all regions closed. 2024-12-01T18:53:02,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_1073741827_1017 (size=1298) 2024-12-01T18:53:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_1073741829_1019 (size=2751) 2024-12-01T18:53:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_1073741829_1019 (size=2751) 2024-12-01T18:53:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_1073741827_1017 (size=1298) 2024-12-01T18:53:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_1073741829_1019 (size=2751) 2024-12-01T18:53:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_1073741827_1017 (size=1298) 2024-12-01T18:53:02,660 DEBUG [RS:0;9ec37ae3355e:39281 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs 2024-12-01T18:53:02,660 INFO [RS:0;9ec37ae3355e:39281 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9ec37ae3355e%2C39281%2C1733079178336:(num 1733079179791) 2024-12-01T18:53:02,660 DEBUG [RS:0;9ec37ae3355e:39281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,660 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,660 DEBUG [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs 2024-12-01T18:53:02,660 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:02,660 INFO [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9ec37ae3355e%2C40531%2C1733079178442.meta:.meta(num 1733079180229) 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:02,661 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:02,661 INFO [RS:0;9ec37ae3355e:39281 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39281 2024-12-01T18:53:02,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,39281,1733079178336 2024-12-01T18:53:02,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:02,664 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:02,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_1073741828_1018 (size=93) 2024-12-01T18:53:02,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_1073741828_1018 (size=93) 2024-12-01T18:53:02,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_1073741828_1018 (size=93) 2024-12-01T18:53:02,665 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,39281,1733079178336] 2024-12-01T18:53:02,667 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,39281,1733079178336 already deleted, retry=false 2024-12-01T18:53:02,667 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,39281,1733079178336 expired; onlineServers=1 2024-12-01T18:53:02,667 DEBUG [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/oldWALs 2024-12-01T18:53:02,667 INFO [RS:1;9ec37ae3355e:40531 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 9ec37ae3355e%2C40531%2C1733079178442:(num 1733079179791) 2024-12-01T18:53:02,667 DEBUG [RS:1;9ec37ae3355e:40531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:02,667 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:02,667 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:02,668 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:02,668 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:02,668 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:02,668 INFO [RS:1;9ec37ae3355e:40531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40531 2024-12-01T18:53:02,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:02,672 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,40531,1733079178442 2024-12-01T18:53:02,672 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:02,672 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,40531,1733079178442] 2024-12-01T18:53:02,674 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,40531,1733079178442 already deleted, retry=false 2024-12-01T18:53:02,674 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,40531,1733079178442 expired; onlineServers=0 2024-12-01T18:53:02,674 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9ec37ae3355e,35907,1733079177647' ***** 2024-12-01T18:53:02,674 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:53:02,674 INFO [M:0;9ec37ae3355e:35907 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:02,674 INFO [M:0;9ec37ae3355e:35907 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:02,674 DEBUG [M:0;9ec37ae3355e:35907 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:53:02,675 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:53:02,675 DEBUG [M:0;9ec37ae3355e:35907 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:53:02,675 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079179458 {}] cleaner.HFileCleaner(306): Exit Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079179458,5,FailOnTimeoutGroup] 2024-12-01T18:53:02,675 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079179457 {}] cleaner.HFileCleaner(306): Exit Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079179457,5,FailOnTimeoutGroup] 2024-12-01T18:53:02,675 INFO [M:0;9ec37ae3355e:35907 {}] hbase.ChoreService(370): Chore service for: master/9ec37ae3355e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:02,675 INFO [M:0;9ec37ae3355e:35907 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:02,675 DEBUG [M:0;9ec37ae3355e:35907 {}] master.HMaster(1795): Stopping service threads 2024-12-01T18:53:02,675 INFO [M:0;9ec37ae3355e:35907 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:53:02,675 INFO [M:0;9ec37ae3355e:35907 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T18:53:02,676 INFO [M:0;9ec37ae3355e:35907 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:53:02,676 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:53:02,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:53:02,676 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:02,677 DEBUG [M:0;9ec37ae3355e:35907 {}] zookeeper.ZKUtil(347): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:53:02,677 WARN [M:0;9ec37ae3355e:35907 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:53:02,677 INFO [M:0;9ec37ae3355e:35907 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/.lastflushedseqids 2024-12-01T18:53:02,686 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,686 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,689 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:58728 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:46223:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58728 dst: /127.0.0.1:46223 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-01T18:53:02,693 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,693 INFO [M:0;9ec37ae3355e:35907 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T18:53:02,693 INFO [M:0;9ec37ae3355e:35907 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:53:02,694 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:53:02,694 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:02,694 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:02,694 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:53:02,694 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:02,694 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-12-01T18:53:02,712 DEBUG [M:0;9ec37ae3355e:35907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56e5afdf9701465e9017e01662276e08 is 82, key is hbase:meta,,1/info:regioninfo/1733079180310/Put/seqid=0 2024-12-01T18:53:02,715 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,715 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,718 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:40346 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40346 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-01T18:53:02,723 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,723 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56e5afdf9701465e9017e01662276e08 2024-12-01T18:53:02,749 DEBUG [M:0;9ec37ae3355e:35907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76314e0a3bdd49c6984911802cd877e2 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733079181046/Put/seqid=0 2024-12-01T18:53:02,751 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,751 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,754 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:40364 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40364 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775552_1037 (size=6440) 2024-12-01T18:53:02,759 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,760 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76314e0a3bdd49c6984911802cd877e2 2024-12-01T18:53:02,766 INFO [RS:0;9ec37ae3355e:39281 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:02,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39281-0x1016f5f814c0001, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,766 INFO [RS:0;9ec37ae3355e:39281 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,39281,1733079178336; zookeeper connection closed. 2024-12-01T18:53:02,767 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18238714 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18238714 2024-12-01T18:53:02,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,773 INFO [RS:1;9ec37ae3355e:40531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:02,773 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40531-0x1016f5f814c0002, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,773 INFO [RS:1;9ec37ae3355e:40531 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,40531,1733079178442; zookeeper connection closed. 2024-12-01T18:53:02,774 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4dbc485f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4dbc485f 2024-12-01T18:53:02,774 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T18:53:02,784 DEBUG [M:0;9ec37ae3355e:35907 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf24cf628d4f4f4fa2104dcffe252b74 is 69, key is 9ec37ae3355e,39281,1733079178336/rs:state/1733079179549/Put/seqid=0 2024-12-01T18:53:02,786 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,787 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-01T18:53:02,789 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-675755677_22 at /127.0.0.1:40394 [Receiving block BP-1910344294-172.17.0.2-1733079174471:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46831:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40394 dst: /127.0.0.1:46831 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-01T18:53:02,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-01T18:53:02,793 WARN [M:0;9ec37ae3355e:35907 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-01T18:53:02,793 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf24cf628d4f4f4fa2104dcffe252b74 2024-12-01T18:53:02,804 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/56e5afdf9701465e9017e01662276e08 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56e5afdf9701465e9017e01662276e08 2024-12-01T18:53:02,813 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/56e5afdf9701465e9017e01662276e08, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T18:53:02,815 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/76314e0a3bdd49c6984911802cd877e2 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76314e0a3bdd49c6984911802cd877e2 2024-12-01T18:53:02,824 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/76314e0a3bdd49c6984911802cd877e2, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T18:53:02,826 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf24cf628d4f4f4fa2104dcffe252b74 as hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf24cf628d4f4f4fa2104dcffe252b74 2024-12-01T18:53:02,836 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf24cf628d4f4f4fa2104dcffe252b74, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T18:53:02,838 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=72, compaction requested=false 2024-12-01T18:53:02,847 INFO [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:02,847 DEBUG [M:0;9ec37ae3355e:35907 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733079182694Disabling compacts and flushes for region at 1733079182694Disabling writes for close at 1733079182694Obtaining lock to block concurrent updates at 1733079182694Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733079182694Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1733079182694Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733079182695 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733079182696 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733079182712 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733079182712Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733079182731 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733079182748 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733079182748Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733079182768 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733079182784 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733079182784Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bd982f6: reopening flushed file at 1733079182802 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39ed8c4f: reopening flushed file at 1733079182813 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@414ef02f: reopening flushed file at 1733079182824 (+11 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 144ms, sequenceid=72, compaction requested=false at 1733079182838 (+14 ms)Writing region close event to WAL at 1733079182847 (+9 ms)Closed at 1733079182847 2024-12-01T18:53:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46831 is added to blk_1073741825_1011 (size=32683) 2024-12-01T18:53:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33515 is added to blk_1073741825_1011 (size=32683) 2024-12-01T18:53:02,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46223 is added to blk_1073741825_1011 (size=32683) 2024-12-01T18:53:02,853 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:02,853 INFO [M:0;9ec37ae3355e:35907 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T18:53:02,853 INFO [M:0;9ec37ae3355e:35907 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35907 2024-12-01T18:53:02,854 INFO [M:0;9ec37ae3355e:35907 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:02,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,957 INFO [M:0;9ec37ae3355e:35907 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:02,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35907-0x1016f5f814c0000, quorum=127.0.0.1:53882, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:02,962 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3297a183{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:02,964 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2b8a83a2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:02,964 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:02,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17c0da3a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:02,965 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@656f7043{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:02,967 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:02,967 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1910344294-172.17.0.2-1733079174471 (Datanode Uuid 0c5e389f-64bb-4742-a919-e3da733d550b) service to localhost/127.0.0.1:33839 2024-12-01T18:53:02,968 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:02,968 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:02,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data5/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,969 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data6/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,969 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:02,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1548acd1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:02,974 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3dc20694{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:02,974 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:02,974 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1fb4f3a3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:02,975 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37e44dc6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:02,977 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:02,977 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:02,977 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1910344294-172.17.0.2-1733079174471 (Datanode Uuid 28ebedd3-97f8-40f2-9e60-8e3491545b48) service to localhost/127.0.0.1:33839 2024-12-01T18:53:02,977 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:02,977 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data3/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,978 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data4/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,978 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:02,981 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d1a7cf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:02,982 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@20b70ca3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:02,982 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:02,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54f91ad6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:02,982 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d0819de{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:02,983 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:02,983 WARN [BP-1910344294-172.17.0.2-1733079174471 heartbeating to localhost/127.0.0.1:33839 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1910344294-172.17.0.2-1733079174471 (Datanode Uuid de2c5c9c-e165-4965-9dec-85edab90846d) service to localhost/127.0.0.1:33839 2024-12-01T18:53:02,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data1/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,984 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/cluster_f4fa078a-3513-db59-9df6-cb16b22f11b4/data/data2/current/BP-1910344294-172.17.0.2-1733079174471 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:02,984 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:02,984 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:02,985 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:02,996 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2ea2dca6{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:53:02,997 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ed9b238{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:02,997 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:02,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@410292bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:02,997 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27f57d6a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:03,007 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:53:03,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T18:53:03,059 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=93 (was 160), OpenFileDescriptor=453 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=145 (was 131) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4321 (was 4639) 2024-12-01T18:53:03,066 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=93, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=145, ProcessCount=11, AvailableMemoryMB=4321 2024-12-01T18:53:03,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-01T18:53:03,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.log.dir so I do NOT create it in target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef 2024-12-01T18:53:03,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/2d31fadd-573d-fa69-f3c3-704c96dfad3c/hadoop.tmp.dir so I do NOT create it in target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef 2024-12-01T18:53:03,067 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322, deleteOnExit=true 2024-12-01T18:53:03,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/test.cache.data in system properties and HBase conf 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.tmp.dir in system properties and HBase conf 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir in system properties and HBase conf 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-01T18:53:03,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-01T18:53:03,069 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-01T18:53:03,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:53:03,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-01T18:53:03,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-01T18:53:03,069 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/nfs.dump.dir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/java.io.tmpdir in system properties and HBase conf 2024-12-01T18:53:03,070 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-01T18:53:03,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-01T18:53:03,071 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-01T18:53:03,174 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:53:03,179 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:53:03,180 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:53:03,180 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:53:03,181 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:53:03,182 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:53:03,182 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@44402c27{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:53:03,183 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a78787c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:53:03,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a29c5ea{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/java.io.tmpdir/jetty-localhost-45181-hadoop-hdfs-3_4_1-tests_jar-_-any-11119678220095517918/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:53:03,307 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@30b20840{HTTP/1.1, (http/1.1)}{localhost:45181} 2024-12-01T18:53:03,307 INFO [Time-limited test {}] server.Server(415): Started @10714ms 2024-12-01T18:53:03,407 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:53:03,410 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:53:03,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:53:03,411 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:53:03,411 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-01T18:53:03,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@95e5bc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:53:03,412 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@585f6017{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:53:03,529 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@534992c5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/java.io.tmpdir/jetty-localhost-35321-hadoop-hdfs-3_4_1-tests_jar-_-any-16346019506927569188/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:03,530 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@67e65606{HTTP/1.1, (http/1.1)}{localhost:35321} 2024-12-01T18:53:03,530 INFO [Time-limited test {}] server.Server(415): Started @10937ms 2024-12-01T18:53:03,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:53:03,564 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:53:03,568 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:53:03,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:53:03,568 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:53:03,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:53:03,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b209404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:53:03,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77da8076{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:53:03,648 WARN [Thread-522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data1/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:03,649 WARN [Thread-523 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data2/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:03,668 WARN [Thread-501 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:53:03,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38a4f048ce93335f with lease ID 0xcc2e382813577c34: Processing first storage report for DS-8590338d-c3db-4560-ae1f-da58c27415f2 from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=fbb4068c-d3c3-435f-aef3-b92db5dc2b10, infoPort=33929, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:03,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38a4f048ce93335f with lease ID 0xcc2e382813577c34: from storage DS-8590338d-c3db-4560-ae1f-da58c27415f2 node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=fbb4068c-d3c3-435f-aef3-b92db5dc2b10, infoPort=33929, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:03,672 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x38a4f048ce93335f with lease ID 0xcc2e382813577c34: Processing first storage report for DS-728dc243-a022-491e-b051-68518097c46c from datanode DatanodeRegistration(127.0.0.1:42613, datanodeUuid=fbb4068c-d3c3-435f-aef3-b92db5dc2b10, infoPort=33929, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:03,672 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x38a4f048ce93335f with lease ID 0xcc2e382813577c34: from storage DS-728dc243-a022-491e-b051-68518097c46c node DatanodeRegistration(127.0.0.1:42613, datanodeUuid=fbb4068c-d3c3-435f-aef3-b92db5dc2b10, infoPort=33929, infoSecurePort=0, ipcPort=37315, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:03,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6fede0c8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/java.io.tmpdir/jetty-localhost-42635-hadoop-hdfs-3_4_1-tests_jar-_-any-4846257051472764331/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:03,716 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@10113c37{HTTP/1.1, (http/1.1)}{localhost:42635} 2024-12-01T18:53:03,716 INFO [Time-limited test {}] server.Server(415): Started @11123ms 2024-12-01T18:53:03,718 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:53:03,750 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-01T18:53:03,753 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-01T18:53:03,753 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-01T18:53:03,754 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-01T18:53:03,754 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-01T18:53:03,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7dddaf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,AVAILABLE} 2024-12-01T18:53:03,754 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23ef21e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-01T18:53:03,828 WARN [Thread-558 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data4/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:03,828 WARN [Thread-557 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data3/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:03,852 WARN [Thread-537 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:53:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x655f18d70110953e with lease ID 0xcc2e382813577c35: Processing first storage report for DS-a96ace87-131e-4dc9-92f8-3221fef35eba from datanode DatanodeRegistration(127.0.0.1:34415, datanodeUuid=e06a0f37-e470-4de9-9120-e9381a88e12d, infoPort=34883, infoSecurePort=0, ipcPort=33857, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x655f18d70110953e with lease ID 0xcc2e382813577c35: from storage DS-a96ace87-131e-4dc9-92f8-3221fef35eba node DatanodeRegistration(127.0.0.1:34415, datanodeUuid=e06a0f37-e470-4de9-9120-e9381a88e12d, infoPort=34883, infoSecurePort=0, ipcPort=33857, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x655f18d70110953e with lease ID 0xcc2e382813577c35: Processing first storage report for DS-ae1141e7-585e-4df8-8c6a-2310ea8d1d2c from datanode DatanodeRegistration(127.0.0.1:34415, datanodeUuid=e06a0f37-e470-4de9-9120-e9381a88e12d, infoPort=34883, infoSecurePort=0, ipcPort=33857, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:03,855 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x655f18d70110953e with lease ID 0xcc2e382813577c35: from storage DS-ae1141e7-585e-4df8-8c6a-2310ea8d1d2c node DatanodeRegistration(127.0.0.1:34415, datanodeUuid=e06a0f37-e470-4de9-9120-e9381a88e12d, infoPort=34883, infoSecurePort=0, ipcPort=33857, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:03,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4748a603{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/java.io.tmpdir/jetty-localhost-45805-hadoop-hdfs-3_4_1-tests_jar-_-any-8582427115213968171/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:03,879 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2da73ce7{HTTP/1.1, (http/1.1)}{localhost:45805} 2024-12-01T18:53:03,879 INFO [Time-limited test {}] server.Server(415): Started @11286ms 2024-12-01T18:53:03,880 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-01T18:53:03,984 WARN [Thread-584 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data6/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:03,984 WARN [Thread-583 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data5/current/BP-226986105-172.17.0.2-1733079183114/current, will proceed with Du for space computation calculation, 2024-12-01T18:53:04,011 WARN [Thread-572 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-01T18:53:04,014 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f11ae2410bce949 with lease ID 0xcc2e382813577c36: Processing first storage report for DS-fe23e852-b927-4f62-ac25-cc383d2d61dd from datanode DatanodeRegistration(127.0.0.1:35625, datanodeUuid=7d3ec84b-fccb-4455-bd38-2c5c9d2834e7, infoPort=45207, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:04,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f11ae2410bce949 with lease ID 0xcc2e382813577c36: from storage DS-fe23e852-b927-4f62-ac25-cc383d2d61dd node DatanodeRegistration(127.0.0.1:35625, datanodeUuid=7d3ec84b-fccb-4455-bd38-2c5c9d2834e7, infoPort=45207, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:04,014 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f11ae2410bce949 with lease ID 0xcc2e382813577c36: Processing first storage report for DS-ce1ed8e6-478e-48da-a75f-91033ea62737 from datanode DatanodeRegistration(127.0.0.1:35625, datanodeUuid=7d3ec84b-fccb-4455-bd38-2c5c9d2834e7, infoPort=45207, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114) 2024-12-01T18:53:04,014 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f11ae2410bce949 with lease ID 0xcc2e382813577c36: from storage DS-ce1ed8e6-478e-48da-a75f-91033ea62737 node DatanodeRegistration(127.0.0.1:35625, datanodeUuid=7d3ec84b-fccb-4455-bd38-2c5c9d2834e7, infoPort=45207, infoSecurePort=0, ipcPort=37097, storageInfo=lv=-57;cid=testClusterID;nsid=504844454;c=1733079183114), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-01T18:53:04,109 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef 2024-12-01T18:53:04,112 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/zookeeper_0, clientPort=61333, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-01T18:53:04,113 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=61333 2024-12-01T18:53:04,113 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,115 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:53:04,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:53:04,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741825_1001 (size=7) 2024-12-01T18:53:04,136 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 with version=8 2024-12-01T18:53:04,136 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:33839/user/jenkins/test-data/6fc4b871-f76f-a5e3-b5cf-175b4dba1c4a/hbase-staging 2024-12-01T18:53:04,138 INFO [Time-limited test {}] client.ConnectionUtils(128): master/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-01T18:53:04,138 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:53:04,139 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41027 2024-12-01T18:53:04,140 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41027 connecting to ZooKeeper ensemble=127.0.0.1:61333 2024-12-01T18:53:04,147 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410270x0, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:53:04,148 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41027-0x1016f5f9d7f0000 connected 2024-12-01T18:53:04,167 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,171 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:04,171 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2, hbase.cluster.distributed=false 2024-12-01T18:53:04,173 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:53:04,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41027 2024-12-01T18:53:04,173 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41027 2024-12-01T18:53:04,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41027 2024-12-01T18:53:04,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41027 2024-12-01T18:53:04,174 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41027 2024-12-01T18:53:04,189 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:53:04,189 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:53:04,190 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:53:04,190 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33297 2024-12-01T18:53:04,192 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33297 connecting to ZooKeeper ensemble=127.0.0.1:61333 2024-12-01T18:53:04,193 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,194 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:332970x0, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:53:04,200 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33297-0x1016f5f9d7f0001 connected 2024-12-01T18:53:04,200 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:04,200 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:53:04,201 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:53:04,201 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:53:04,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:53:04,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33297 2024-12-01T18:53:04,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33297 2024-12-01T18:53:04,203 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33297 2024-12-01T18:53:04,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33297 2024-12-01T18:53:04,204 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33297 2024-12-01T18:53:04,218 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:53:04,219 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:53:04,220 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36231 2024-12-01T18:53:04,221 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36231 connecting to ZooKeeper ensemble=127.0.0.1:61333 2024-12-01T18:53:04,222 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,223 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,227 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:362310x0, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:53:04,228 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:362310x0, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:04,228 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36231-0x1016f5f9d7f0002 connected 2024-12-01T18:53:04,228 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:53:04,229 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:53:04,229 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:53:04,230 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:53:04,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36231 2024-12-01T18:53:04,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36231 2024-12-01T18:53:04,231 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36231 2024-12-01T18:53:04,232 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36231 2024-12-01T18:53:04,232 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36231 2024-12-01T18:53:04,248 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/9ec37ae3355e:0 server-side Connection retries=45 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-01T18:53:04,248 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-01T18:53:04,249 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45713 2024-12-01T18:53:04,250 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45713 connecting to ZooKeeper ensemble=127.0.0.1:61333 2024-12-01T18:53:04,251 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,253 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457130x0, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-01T18:53:04,258 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45713-0x1016f5f9d7f0003 connected 2024-12-01T18:53:04,258 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:04,258 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-01T18:53:04,259 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-01T18:53:04,259 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-01T18:53:04,260 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-01T18:53:04,261 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45713 2024-12-01T18:53:04,262 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45713 2024-12-01T18:53:04,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45713 2024-12-01T18:53:04,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45713 2024-12-01T18:53:04,267 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45713 2024-12-01T18:53:04,279 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;9ec37ae3355e:41027 2024-12-01T18:53:04,279 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,281 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,282 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,287 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-01T18:53:04,287 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/9ec37ae3355e,41027,1733079184138 from backup master directory 2024-12-01T18:53:04,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,289 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:53:04,289 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-01T18:53:04,299 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/hbase.id] with ID: 3da2eda5-a335-4802-b39e-a9ed1b8d893d 2024-12-01T18:53:04,299 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/.tmp/hbase.id 2024-12-01T18:53:04,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:53:04,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:53:04,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741826_1002 (size=42) 2024-12-01T18:53:04,311 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/.tmp/hbase.id]:[hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/hbase.id] 2024-12-01T18:53:04,327 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-01T18:53:04,327 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-01T18:53:04,329 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-01T18:53:04,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:53:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:53:04,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741827_1003 (size=196) 2024-12-01T18:53:04,343 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:53:04,344 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-01T18:53:04,345 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:53:04,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:53:04,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:53:04,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741828_1004 (size=1189) 2024-12-01T18:53:04,358 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store 2024-12-01T18:53:04,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:53:04,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:53:04,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741829_1005 (size=34) 2024-12-01T18:53:04,370 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:04,371 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:53:04,371 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:04,371 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:04,371 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:53:04,371 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:04,371 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:04,371 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733079184371Disabling compacts and flushes for region at 1733079184371Disabling writes for close at 1733079184371Writing region close event to WAL at 1733079184371Closed at 1733079184371 2024-12-01T18:53:04,372 WARN [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/.initializing 2024-12-01T18:53:04,372 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/WALs/9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,376 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C41027%2C1733079184138, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/WALs/9ec37ae3355e,41027,1733079184138, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/oldWALs, maxLogs=10 2024-12-01T18:53:04,377 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9ec37ae3355e%2C41027%2C1733079184138.1733079184376 2024-12-01T18:53:04,386 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/WALs/9ec37ae3355e,41027,1733079184138/9ec37ae3355e%2C41027%2C1733079184138.1733079184376 2024-12-01T18:53:04,391 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45207:45207),(127.0.0.1/127.0.0.1:33929:33929),(127.0.0.1/127.0.0.1:34883:34883)] 2024-12-01T18:53:04,392 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:53:04,392 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:04,392 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,392 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,396 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,398 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-01T18:53:04,398 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:04,399 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-01T18:53:04,401 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,401 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:53:04,402 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,404 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-01T18:53:04,405 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:53:04,405 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-01T18:53:04,407 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,407 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:53:04,408 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,408 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,409 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,410 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,410 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,411 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:53:04,412 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-01T18:53:04,415 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:53:04,416 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64136996, jitterRate=-0.044284284114837646}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:53:04,416 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733079184392Initializing all the Stores at 1733079184394 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079184394Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079184396 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079184396Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079184396Cleaning up temporary data from old regions at 1733079184411 (+15 ms)Region opened successfully at 1733079184416 (+5 ms) 2024-12-01T18:53:04,417 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-01T18:53:04,421 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e6b7dad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:53:04,422 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-01T18:53:04,422 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-01T18:53:04,422 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-01T18:53:04,423 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-01T18:53:04,423 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-01T18:53:04,423 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-01T18:53:04,424 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-01T18:53:04,426 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-01T18:53:04,427 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-01T18:53:04,428 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-01T18:53:04,429 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-01T18:53:04,429 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-01T18:53:04,432 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-01T18:53:04,432 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-01T18:53:04,433 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-01T18:53:04,434 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-01T18:53:04,435 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-01T18:53:04,436 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-01T18:53:04,438 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-01T18:53:04,439 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-01T18:53:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:04,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:04,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,443 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,443 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=9ec37ae3355e,41027,1733079184138, sessionid=0x1016f5f9d7f0000, setting cluster-up flag (Was=false) 2024-12-01T18:53:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,454 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-01T18:53:04,455 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,465 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-01T18:53:04,466 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:04,468 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-01T18:53:04,471 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-01T18:53:04,471 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-01T18:53:04,471 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-01T18:53:04,472 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 9ec37ae3355e,41027,1733079184138 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=5, maxPoolSize=5 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/9ec37ae3355e:0, corePoolSize=10, maxPoolSize=10 2024-12-01T18:53:04,473 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,474 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:53:04,474 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,475 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733079214475 2024-12-01T18:53:04,475 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-01T18:53:04,475 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-01T18:53:04,475 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-01T18:53:04,475 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-01T18:53:04,476 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-01T18:53:04,476 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-01T18:53:04,476 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,476 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:53:04,476 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-01T18:53:04,476 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-01T18:53:04,477 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-01T18:53:04,477 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-01T18:53:04,477 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,477 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-01T18:53:04,478 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-01T18:53:04,478 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-01T18:53:04,478 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079184478,5,FailOnTimeoutGroup] 2024-12-01T18:53:04,479 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079184478,5,FailOnTimeoutGroup] 2024-12-01T18:53:04,479 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,479 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-01T18:53:04,479 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,479 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741831_1007 (size=1321) 2024-12-01T18:53:04,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741831_1007 (size=1321) 2024-12-01T18:53:04,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741831_1007 (size=1321) 2024-12-01T18:53:04,491 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-01T18:53:04,491 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 2024-12-01T18:53:04,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:53:04,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:53:04,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741832_1008 (size=32) 2024-12-01T18:53:04,500 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:04,502 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:53:04,503 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:53:04,504 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:04,504 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T18:53:04,506 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T18:53:04,506 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:04,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:53:04,508 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:53:04,508 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:04,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:53:04,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:53:04,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:04,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:04,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T18:53:04,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740 2024-12-01T18:53:04,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740 2024-12-01T18:53:04,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T18:53:04,515 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T18:53:04,516 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:53:04,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T18:53:04,520 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:53:04,520 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67652999, jitterRate=0.008108243346214294}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:53:04,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733079184500Initializing all the Stores at 1733079184501 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079184501Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079184501Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079184501Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079184501Cleaning up temporary data from old regions at 1733079184515 (+14 ms)Region opened successfully at 1733079184521 (+6 ms) 2024-12-01T18:53:04,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:53:04,521 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T18:53:04,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T18:53:04,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:53:04,521 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:53:04,521 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T18:53:04,522 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733079184521Disabling compacts and flushes for region at 1733079184521Disabling writes for close at 1733079184521Writing region close event to WAL at 1733079184521Closed at 1733079184521 2024-12-01T18:53:04,524 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:53:04,524 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-01T18:53:04,524 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-01T18:53:04,526 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:53:04,527 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-01T18:53:04,569 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(746): ClusterId : 3da2eda5-a335-4802-b39e-a9ed1b8d893d 2024-12-01T18:53:04,569 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(746): ClusterId : 3da2eda5-a335-4802-b39e-a9ed1b8d893d 2024-12-01T18:53:04,569 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:53:04,569 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:53:04,569 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(746): ClusterId : 3da2eda5-a335-4802-b39e-a9ed1b8d893d 2024-12-01T18:53:04,569 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-01T18:53:04,571 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:53:04,571 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:53:04,571 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-01T18:53:04,571 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:53:04,571 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:53:04,572 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-01T18:53:04,577 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:53:04,577 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:53:04,577 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-01T18:53:04,578 DEBUG [RS:2;9ec37ae3355e:45713 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cbedcda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:53:04,578 DEBUG [RS:0;9ec37ae3355e:33297 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@577e5775, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:53:04,578 DEBUG [RS:1;9ec37ae3355e:36231 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43965310, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=9ec37ae3355e/172.17.0.2:0 2024-12-01T18:53:04,590 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;9ec37ae3355e:45713 2024-12-01T18:53:04,590 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:53:04,590 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:53:04,590 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:53:04,591 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,41027,1733079184138 with port=45713, startcode=1733079184247 2024-12-01T18:53:04,591 DEBUG [RS:2;9ec37ae3355e:45713 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:53:04,593 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41909, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:53:04,594 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,594 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,595 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;9ec37ae3355e:33297 2024-12-01T18:53:04,595 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;9ec37ae3355e:36231 2024-12-01T18:53:04,595 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:53:04,595 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-01T18:53:04,595 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:53:04,595 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-01T18:53:04,595 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:53:04,596 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-01T18:53:04,596 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 2024-12-01T18:53:04,596 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36899 2024-12-01T18:53:04,596 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:53:04,596 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,41027,1733079184138 with port=36231, startcode=1733079184218 2024-12-01T18:53:04,596 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(2659): reportForDuty to master=9ec37ae3355e,41027,1733079184138 with port=33297, startcode=1733079184189 2024-12-01T18:53:04,597 DEBUG [RS:0;9ec37ae3355e:33297 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:53:04,597 DEBUG [RS:1;9ec37ae3355e:36231 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-01T18:53:04,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:04,598 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38321, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:53:04,599 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55893, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-01T18:53:04,599 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,599 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,599 DEBUG [RS:2;9ec37ae3355e:45713 {}] zookeeper.ZKUtil(111): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,599 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,45713,1733079184247] 2024-12-01T18:53:04,599 WARN [RS:2;9ec37ae3355e:45713 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:53:04,599 INFO [RS:2;9ec37ae3355e:45713 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:53:04,599 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,601 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,601 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41027 {}] master.ServerManager(517): Registering regionserver=9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,601 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 2024-12-01T18:53:04,601 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36899 2024-12-01T18:53:04,601 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:53:04,603 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 2024-12-01T18:53:04,603 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36899 2024-12-01T18:53:04,603 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-01T18:53:04,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:04,607 DEBUG [RS:1;9ec37ae3355e:36231 {}] zookeeper.ZKUtil(111): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,607 WARN [RS:1;9ec37ae3355e:36231 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:53:04,607 INFO [RS:1;9ec37ae3355e:36231 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:53:04,607 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,36231,1733079184218] 2024-12-01T18:53:04,607 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [9ec37ae3355e,33297,1733079184189] 2024-12-01T18:53:04,607 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,608 DEBUG [RS:0;9ec37ae3355e:33297 {}] zookeeper.ZKUtil(111): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,608 WARN [RS:0;9ec37ae3355e:33297 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-01T18:53:04,608 INFO [RS:0;9ec37ae3355e:33297 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:53:04,608 DEBUG [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,608 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:53:04,612 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:53:04,614 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:53:04,616 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:53:04,616 INFO [RS:2;9ec37ae3355e:45713 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:53:04,617 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,618 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-01T18:53:04,621 INFO [RS:1;9ec37ae3355e:36231 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:53:04,621 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:53:04,621 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,621 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-01T18:53:04,621 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:53:04,621 INFO [RS:0;9ec37ae3355e:33297 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-01T18:53:04,621 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,622 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-01T18:53:04,622 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:53:04,622 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,622 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,622 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:2;9ec37ae3355e:45713 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,623 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,624 DEBUG [RS:1;9ec37ae3355e:36231 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,624 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,45713,1733079184247-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/9ec37ae3355e:0, corePoolSize=2, maxPoolSize=2 2024-12-01T18:53:04,624 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/9ec37ae3355e:0, corePoolSize=1, maxPoolSize=1 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,625 DEBUG [RS:0;9ec37ae3355e:33297 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0, corePoolSize=3, maxPoolSize=3 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,36231,1733079184218-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,628 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,33297,1733079184189-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:53:04,642 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:53:04,642 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,36231,1733079184218-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,643 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,643 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.Replication(171): 9ec37ae3355e,36231,1733079184218 started 2024-12-01T18:53:04,646 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:53:04,647 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,45713,1733079184247-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,647 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,647 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.Replication(171): 9ec37ae3355e,45713,1733079184247 started 2024-12-01T18:53:04,653 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-01T18:53:04,653 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,33297,1733079184189-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,653 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,654 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.Replication(171): 9ec37ae3355e,33297,1733079184189 started 2024-12-01T18:53:04,657 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,657 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,36231,1733079184218, RpcServer on 9ec37ae3355e/172.17.0.2:36231, sessionid=0x1016f5f9d7f0002 2024-12-01T18:53:04,657 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:53:04,657 DEBUG [RS:1;9ec37ae3355e:36231 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,657 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,36231,1733079184218' 2024-12-01T18:53:04,657 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:53:04,658 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,36231,1733079184218' 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:53:04,659 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:53:04,660 DEBUG [RS:1;9ec37ae3355e:36231 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:53:04,660 INFO [RS:1;9ec37ae3355e:36231 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:53:04,660 INFO [RS:1;9ec37ae3355e:36231 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:53:04,668 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,668 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,45713,1733079184247, RpcServer on 9ec37ae3355e/172.17.0.2:45713, sessionid=0x1016f5f9d7f0003 2024-12-01T18:53:04,668 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:53:04,668 DEBUG [RS:2;9ec37ae3355e:45713 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,668 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,45713,1733079184247' 2024-12-01T18:53:04,668 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:53:04,669 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,45713,1733079184247' 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:53:04,670 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:53:04,671 DEBUG [RS:2;9ec37ae3355e:45713 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:53:04,671 INFO [RS:2;9ec37ae3355e:45713 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:53:04,671 INFO [RS:2;9ec37ae3355e:45713 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:53:04,673 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:04,673 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1482): Serving as 9ec37ae3355e,33297,1733079184189, RpcServer on 9ec37ae3355e/172.17.0.2:33297, sessionid=0x1016f5f9d7f0001 2024-12-01T18:53:04,673 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-01T18:53:04,673 DEBUG [RS:0;9ec37ae3355e:33297 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,673 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,33297,1733079184189' 2024-12-01T18:53:04,673 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '9ec37ae3355e,33297,1733079184189' 2024-12-01T18:53:04,674 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-01T18:53:04,675 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-01T18:53:04,675 DEBUG [RS:0;9ec37ae3355e:33297 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-01T18:53:04,675 INFO [RS:0;9ec37ae3355e:33297 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-01T18:53:04,675 INFO [RS:0;9ec37ae3355e:33297 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-01T18:53:04,678 WARN [9ec37ae3355e:41027 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-01T18:53:04,762 INFO [RS:1;9ec37ae3355e:36231 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C36231%2C1733079184218, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,36231,1733079184218, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs, maxLogs=32 2024-12-01T18:53:04,764 INFO [RS:1;9ec37ae3355e:36231 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9ec37ae3355e%2C36231%2C1733079184218.1733079184764 2024-12-01T18:53:04,772 INFO [RS:1;9ec37ae3355e:36231 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,36231,1733079184218/9ec37ae3355e%2C36231%2C1733079184218.1733079184764 2024-12-01T18:53:04,773 INFO [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C45713%2C1733079184247, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,45713,1733079184247, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs, maxLogs=32 2024-12-01T18:53:04,774 DEBUG [RS:1;9ec37ae3355e:36231 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34883:34883),(127.0.0.1/127.0.0.1:33929:33929),(127.0.0.1/127.0.0.1:45207:45207)] 2024-12-01T18:53:04,775 INFO [RS:2;9ec37ae3355e:45713 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9ec37ae3355e%2C45713%2C1733079184247.1733079184774 2024-12-01T18:53:04,777 INFO [RS:0;9ec37ae3355e:33297 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C33297%2C1733079184189, suffix=, logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,33297,1733079184189, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs, maxLogs=32 2024-12-01T18:53:04,778 INFO [RS:0;9ec37ae3355e:33297 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 9ec37ae3355e%2C33297%2C1733079184189.1733079184778 2024-12-01T18:53:04,783 INFO [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,45713,1733079184247/9ec37ae3355e%2C45713%2C1733079184247.1733079184774 2024-12-01T18:53:04,785 DEBUG [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33929:33929),(127.0.0.1/127.0.0.1:34883:34883),(127.0.0.1/127.0.0.1:45207:45207)] 2024-12-01T18:53:04,785 INFO [RS:0;9ec37ae3355e:33297 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,33297,1733079184189/9ec37ae3355e%2C33297%2C1733079184189.1733079184778 2024-12-01T18:53:04,786 DEBUG [RS:0;9ec37ae3355e:33297 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34883:34883),(127.0.0.1/127.0.0.1:45207:45207),(127.0.0.1/127.0.0.1:33929:33929)] 2024-12-01T18:53:04,928 DEBUG [9ec37ae3355e:41027 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-01T18:53:04,928 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(204): Hosts are {9ec37ae3355e=0} racks are {/default-rack=0} 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T18:53:04,931 INFO [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T18:53:04,931 INFO [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T18:53:04,931 INFO [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T18:53:04,931 DEBUG [9ec37ae3355e:41027 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T18:53:04,931 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:04,933 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9ec37ae3355e,45713,1733079184247, state=OPENING 2024-12-01T18:53:04,935 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-01T18:53:04,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:04,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:04,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:04,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:04,937 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:04,937 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-01T18:53:04,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=9ec37ae3355e,45713,1733079184247}] 2024-12-01T18:53:05,092 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:53:05,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48717, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:53:05,098 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-01T18:53:05,099 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-01T18:53:05,101 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=9ec37ae3355e%2C45713%2C1733079184247.meta, suffix=.meta, logDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,45713,1733079184247, archiveDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs, maxLogs=32 2024-12-01T18:53:05,102 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 9ec37ae3355e%2C45713%2C1733079184247.meta.1733079185102.meta 2024-12-01T18:53:05,110 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/WALs/9ec37ae3355e,45713,1733079184247/9ec37ae3355e%2C45713%2C1733079184247.meta.1733079185102.meta 2024-12-01T18:53:05,111 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34883:34883),(127.0.0.1/127.0.0.1:45207:45207),(127.0.0.1/127.0.0.1:33929:33929)] 2024-12-01T18:53:05,111 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-01T18:53:05,112 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-01T18:53:05,112 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-01T18:53:05,114 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-01T18:53:05,115 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-01T18:53:05,115 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:05,116 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-01T18:53:05,117 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-01T18:53:05,117 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,117 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:05,118 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-01T18:53:05,118 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-01T18:53:05,119 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:05,119 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-01T18:53:05,120 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-01T18:53:05,120 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-01T18:53:05,121 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-01T18:53:05,122 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740 2024-12-01T18:53:05,123 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740 2024-12-01T18:53:05,125 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-01T18:53:05,125 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-01T18:53:05,125 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-01T18:53:05,127 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-01T18:53:05,128 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69342585, jitterRate=0.03328503668308258}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-01T18:53:05,128 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-01T18:53:05,129 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733079185112Writing region info on filesystem at 1733079185112Initializing all the Stores at 1733079185114 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079185114Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079185114Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079185114Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733079185114Cleaning up temporary data from old regions at 1733079185125 (+11 ms)Running coprocessor post-open hooks at 1733079185128 (+3 ms)Region opened successfully at 1733079185129 (+1 ms) 2024-12-01T18:53:05,131 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733079185091 2024-12-01T18:53:05,134 DEBUG [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-01T18:53:05,134 INFO [RS_OPEN_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-01T18:53:05,135 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:05,137 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 9ec37ae3355e,45713,1733079184247, state=OPEN 2024-12-01T18:53:05,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:05,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:05,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:05,139 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-01T18:53:05,139 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:05,139 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:05,139 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:05,139 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:05,139 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-01T18:53:05,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-01T18:53:05,143 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=9ec37ae3355e,45713,1733079184247 in 202 msec 2024-12-01T18:53:05,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-01T18:53:05,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 619 msec 2024-12-01T18:53:05,148 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-01T18:53:05,148 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-01T18:53:05,150 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T18:53:05,150 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9ec37ae3355e,45713,1733079184247, seqNum=-1] 2024-12-01T18:53:05,150 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:05,152 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58677, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:05,160 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 689 msec 2024-12-01T18:53:05,160 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733079185160, completionTime=-1 2024-12-01T18:53:05,161 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-01T18:53:05,161 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733079245163 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733079305163 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-9ec37ae3355e:41027, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,163 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,164 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,165 DEBUG [master/9ec37ae3355e:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-01T18:53:05,168 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.878sec 2024-12-01T18:53:05,168 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-01T18:53:05,168 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-01T18:53:05,168 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-01T18:53:05,168 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-01T18:53:05,169 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-01T18:53:05,169 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-01T18:53:05,169 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-01T18:53:05,169 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58397a7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:05,169 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 9ec37ae3355e,41027,-1 for getting cluster id 2024-12-01T18:53:05,169 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-01T18:53:05,171 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3da2eda5-a335-4802-b39e-a9ed1b8d893d' 2024-12-01T18:53:05,171 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-01T18:53:05,171 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3da2eda5-a335-4802-b39e-a9ed1b8d893d" 2024-12-01T18:53:05,172 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-01T18:53:05,172 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-01T18:53:05,172 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20834f03, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:05,172 INFO [master/9ec37ae3355e:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=9ec37ae3355e,41027,1733079184138-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-01T18:53:05,172 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [9ec37ae3355e,41027,-1] 2024-12-01T18:53:05,172 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-01T18:53:05,172 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:05,174 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-01T18:53:05,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4ce6234c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-01T18:53:05,175 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-01T18:53:05,176 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=9ec37ae3355e,45713,1733079184247, seqNum=-1] 2024-12-01T18:53:05,176 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:05,178 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42250, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:05,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:05,181 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-01T18:53:05,182 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:05,182 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5e7fdfff 2024-12-01T18:53:05,182 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-01T18:53:05,184 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-01T18:53:05,185 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-01T18:53:05,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-01T18:53:05,188 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-01T18:53:05,189 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,189 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-01T18:53:05,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:05,190 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-01T18:53:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741837_1013 (size=392) 2024-12-01T18:53:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741837_1013 (size=392) 2024-12-01T18:53:05,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741837_1013 (size=392) 2024-12-01T18:53:05,206 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 816334c3ed5b276da72bbb808d9f822d, NAME => 'TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2 2024-12-01T18:53:05,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741838_1014 (size=51) 2024-12-01T18:53:05,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741838_1014 (size=51) 2024-12-01T18:53:05,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741838_1014 (size=51) 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 816334c3ed5b276da72bbb808d9f822d, disabling compactions & flushes 2024-12-01T18:53:05,221 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. after waiting 0 ms 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,221 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,221 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 816334c3ed5b276da72bbb808d9f822d: Waiting for close lock at 1733079185221Disabling compacts and flushes for region at 1733079185221Disabling writes for close at 1733079185221Writing region close event to WAL at 1733079185221Closed at 1733079185221 2024-12-01T18:53:05,223 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-01T18:53:05,224 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733079185223"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733079185223"}]},"ts":"1733079185223"} 2024-12-01T18:53:05,227 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-01T18:53:05,229 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-01T18:53:05,229 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733079185229"}]},"ts":"1733079185229"} 2024-12-01T18:53:05,232 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-01T18:53:05,233 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {9ec37ae3355e=0} racks are {/default-rack=0} 2024-12-01T18:53:05,233 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-01T18:53:05,233 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-01T18:53:05,234 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-01T18:53:05,234 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-01T18:53:05,234 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-01T18:53:05,234 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-01T18:53:05,234 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-01T18:53:05,234 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-01T18:53:05,234 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-01T18:53:05,234 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-01T18:53:05,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=816334c3ed5b276da72bbb808d9f822d, ASSIGN}] 2024-12-01T18:53:05,236 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=816334c3ed5b276da72bbb808d9f822d, ASSIGN 2024-12-01T18:53:05,238 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=816334c3ed5b276da72bbb808d9f822d, ASSIGN; state=OFFLINE, location=9ec37ae3355e,36231,1733079184218; forceNewPlan=false, retain=false 2024-12-01T18:53:05,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:05,388 INFO [9ec37ae3355e:41027 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-01T18:53:05,389 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=816334c3ed5b276da72bbb808d9f822d, regionState=OPENING, regionLocation=9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:05,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=816334c3ed5b276da72bbb808d9f822d, ASSIGN because future has completed 2024-12-01T18:53:05,393 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 816334c3ed5b276da72bbb808d9f822d, server=9ec37ae3355e,36231,1733079184218}] 2024-12-01T18:53:05,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:05,547 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-01T18:53:05,549 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59401, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-01T18:53:05,553 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,554 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 816334c3ed5b276da72bbb808d9f822d, NAME => 'TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.', STARTKEY => '', ENDKEY => ''} 2024-12-01T18:53:05,554 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,554 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-01T18:53:05,554 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,554 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,556 INFO [StoreOpener-816334c3ed5b276da72bbb808d9f822d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,558 INFO [StoreOpener-816334c3ed5b276da72bbb808d9f822d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 816334c3ed5b276da72bbb808d9f822d columnFamilyName cf 2024-12-01T18:53:05,558 DEBUG [StoreOpener-816334c3ed5b276da72bbb808d9f822d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-01T18:53:05,558 INFO [StoreOpener-816334c3ed5b276da72bbb808d9f822d-1 {}] regionserver.HStore(327): Store=816334c3ed5b276da72bbb808d9f822d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-01T18:53:05,559 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,560 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,560 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,561 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,561 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,562 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,565 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-01T18:53:05,565 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 816334c3ed5b276da72bbb808d9f822d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74326974, jitterRate=0.1075582206249237}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-01T18:53:05,566 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:05,566 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 816334c3ed5b276da72bbb808d9f822d: Running coprocessor pre-open hook at 1733079185554Writing region info on filesystem at 1733079185554Initializing all the Stores at 1733079185556 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733079185556Cleaning up temporary data from old regions at 1733079185561 (+5 ms)Running coprocessor post-open hooks at 1733079185566 (+5 ms)Region opened successfully at 1733079185566 2024-12-01T18:53:05,568 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d., pid=6, masterSystemTime=1733079185547 2024-12-01T18:53:05,571 DEBUG [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,571 INFO [RS_OPEN_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,572 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=816334c3ed5b276da72bbb808d9f822d, regionState=OPEN, openSeqNum=2, regionLocation=9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:05,576 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 816334c3ed5b276da72bbb808d9f822d, server=9ec37ae3355e,36231,1733079184218 because future has completed 2024-12-01T18:53:05,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-01T18:53:05,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 816334c3ed5b276da72bbb808d9f822d, server=9ec37ae3355e,36231,1733079184218 in 184 msec 2024-12-01T18:53:05,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-01T18:53:05,585 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=816334c3ed5b276da72bbb808d9f822d, ASSIGN in 347 msec 2024-12-01T18:53:05,586 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-01T18:53:05,587 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733079185586"}]},"ts":"1733079185586"} 2024-12-01T18:53:05,589 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-01T18:53:05,591 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-01T18:53:05,595 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 406 msec 2024-12-01T18:53:05,769 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-01T18:53:05,775 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:53:05,821 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:53:05,822 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-01T18:53:05,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-01T18:53:05,823 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T18:53:05,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-01T18:53:05,823 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:53:05,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-01T18:53:05,827 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-01T18:53:05,827 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-01T18:53:05,830 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d., hostname=9ec37ae3355e,36231,1733079184218, seqNum=2] 2024-12-01T18:53:05,831 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-01T18:53:05,833 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58760, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-01T18:53:05,836 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-01T18:53:05,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-01T18:53:05,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:05,840 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-01T18:53:05,841 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-01T18:53:05,841 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-01T18:53:05,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:05,995 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36231 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-01T18:53:05,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:05,996 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 816334c3ed5b276da72bbb808d9f822d 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-01T18:53:06,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/.tmp/cf/9d54f381ede442dcb0e9230906230420 is 36, key is row/cf:cq/1733079185833/Put/seqid=0 2024-12-01T18:53:06,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741839_1015 (size=4787) 2024-12-01T18:53:06,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741839_1015 (size=4787) 2024-12-01T18:53:06,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741839_1015 (size=4787) 2024-12-01T18:53:06,030 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/.tmp/cf/9d54f381ede442dcb0e9230906230420 2024-12-01T18:53:06,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/.tmp/cf/9d54f381ede442dcb0e9230906230420 as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/cf/9d54f381ede442dcb0e9230906230420 2024-12-01T18:53:06,047 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/cf/9d54f381ede442dcb0e9230906230420, entries=1, sequenceid=5, filesize=4.7 K 2024-12-01T18:53:06,049 INFO [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 816334c3ed5b276da72bbb808d9f822d in 53ms, sequenceid=5, compaction requested=false 2024-12-01T18:53:06,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 816334c3ed5b276da72bbb808d9f822d: 2024-12-01T18:53:06,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/9ec37ae3355e:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-01T18:53:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-01T18:53:06,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-01T18:53:06,056 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 211 msec 2024-12-01T18:53:06,060 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 221 msec 2024-12-01T18:53:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41027 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-01T18:53:06,152 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-01T18:53:06,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-01T18:53:06,157 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T18:53:06,157 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:06,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,157 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-01T18:53:06,157 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-01T18:53:06,157 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1198894410, stopped=false 2024-12-01T18:53:06,158 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=9ec37ae3355e,41027,1733079184138 2024-12-01T18:53:06,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:06,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:06,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:06,160 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-01T18:53:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:06,161 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:06,161 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T18:53:06,161 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-01T18:53:06,161 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:06,161 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:06,162 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:06,162 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,162 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,33297,1733079184189' ***** 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,36231,1733079184218' ***** 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '9ec37ae3355e,45713,1733079184247' ***** 2024-12-01T18:53:06,162 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-01T18:53:06,162 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-01T18:53:06,162 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:06,162 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:06,162 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:06,162 INFO [RS:0;9ec37ae3355e:33297 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:06,162 INFO [RS:0;9ec37ae3355e:33297 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:06,162 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-01T18:53:06,162 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:06,163 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:06,163 INFO [RS:1;9ec37ae3355e:36231 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:06,163 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:06,163 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-01T18:53:06,163 INFO [RS:2;9ec37ae3355e:45713 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-01T18:53:06,163 INFO [RS:1;9ec37ae3355e:36231 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:06,163 INFO [RS:2;9ec37ae3355e:45713 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-01T18:53:06,163 INFO [RS:0;9ec37ae3355e:33297 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;9ec37ae3355e:33297. 2024-12-01T18:53:06,163 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(3091): Received CLOSE for 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:06,163 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:06,163 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:06,164 DEBUG [RS:0;9ec37ae3355e:33297 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:06,164 INFO [RS:2;9ec37ae3355e:45713 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;9ec37ae3355e:45713. 2024-12-01T18:53:06,164 DEBUG [RS:0;9ec37ae3355e:33297 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,164 DEBUG [RS:2;9ec37ae3355e:45713 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:06,164 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(959): stopping server 9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:06,164 DEBUG [RS:2;9ec37ae3355e:45713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,164 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,33297,1733079184189; all regions closed. 2024-12-01T18:53:06,164 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:06,164 INFO [RS:1;9ec37ae3355e:36231 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;9ec37ae3355e:36231. 2024-12-01T18:53:06,164 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 816334c3ed5b276da72bbb808d9f822d, disabling compactions & flushes 2024-12-01T18:53:06,164 INFO [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,192 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,192 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. after waiting 0 ms 2024-12-01T18:53:06,192 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:06,192 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,192 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:06,192 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:06,192 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-01T18:53:06,164 DEBUG [RS:1;9ec37ae3355e:36231 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-01T18:53:06,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,192 DEBUG [RS:1;9ec37ae3355e:36231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,192 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T18:53:06,192 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1325): Online Regions={816334c3ed5b276da72bbb808d9f822d=TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d.} 2024-12-01T18:53:06,193 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,193 DEBUG [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1351): Waiting on 816334c3ed5b276da72bbb808d9f822d 2024-12-01T18:53:06,193 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,193 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,194 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-01T18:53:06,194 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-01T18:53:06,194 DEBUG [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-01T18:53:06,195 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-01T18:53:06,195 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-01T18:53:06,195 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-01T18:53:06,195 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-01T18:53:06,195 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-01T18:53:06,195 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-01T18:53:06,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741835_1011 (size=93) 2024-12-01T18:53:06,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741835_1011 (size=93) 2024-12-01T18:53:06,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741835_1011 (size=93) 2024-12-01T18:53:06,200 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/default/TestHBaseWalOnEC/816334c3ed5b276da72bbb808d9f822d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-01T18:53:06,201 DEBUG [RS:0;9ec37ae3355e:33297 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs 2024-12-01T18:53:06,201 INFO [RS:0;9ec37ae3355e:33297 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9ec37ae3355e%2C33297%2C1733079184189:(num 1733079184778) 2024-12-01T18:53:06,201 DEBUG [RS:0;9ec37ae3355e:33297 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,201 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,201 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:06,202 INFO [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:06,202 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 816334c3ed5b276da72bbb808d9f822d: Waiting for close lock at 1733079186164Running coprocessor pre-close hooks at 1733079186164Disabling compacts and flushes for region at 1733079186164Disabling writes for close at 1733079186192 (+28 ms)Writing region close event to WAL at 1733079186195 (+3 ms)Running coprocessor post-close hooks at 1733079186201 (+6 ms)Closed at 1733079186202 (+1 ms) 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:06,202 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:06,202 DEBUG [RS_CLOSE_REGION-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d. 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:06,202 INFO [RS:0;9ec37ae3355e:33297 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33297 2024-12-01T18:53:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,33297,1733079184189 2024-12-01T18:53:06,205 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:06,205 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:06,206 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,33297,1733079184189] 2024-12-01T18:53:06,207 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,33297,1733079184189 already deleted, retry=false 2024-12-01T18:53:06,207 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,33297,1733079184189 expired; onlineServers=2 2024-12-01T18:53:06,218 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/info/01c4f94d24554576af05c3fa44c1c0b7 is 153, key is TestHBaseWalOnEC,,1733079185184.816334c3ed5b276da72bbb808d9f822d./info:regioninfo/1733079185572/Put/seqid=0 2024-12-01T18:53:06,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741840_1016 (size=6637) 2024-12-01T18:53:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741840_1016 (size=6637) 2024-12-01T18:53:06,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741840_1016 (size=6637) 2024-12-01T18:53:06,228 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/info/01c4f94d24554576af05c3fa44c1c0b7 2024-12-01T18:53:06,229 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,233 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,233 INFO [regionserver/9ec37ae3355e:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,250 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/ns/cee3c7fb4dcb4bbbaaa848a28014e6c5 is 43, key is default/ns:d/1733079185153/Put/seqid=0 2024-12-01T18:53:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741841_1017 (size=5153) 2024-12-01T18:53:06,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741841_1017 (size=5153) 2024-12-01T18:53:06,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741841_1017 (size=5153) 2024-12-01T18:53:06,257 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/ns/cee3c7fb4dcb4bbbaaa848a28014e6c5 2024-12-01T18:53:06,280 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/table/857e7fb1f6c949af999479869477e52f is 52, key is TestHBaseWalOnEC/table:state/1733079185586/Put/seqid=0 2024-12-01T18:53:06,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741842_1018 (size=5249) 2024-12-01T18:53:06,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741842_1018 (size=5249) 2024-12-01T18:53:06,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741842_1018 (size=5249) 2024-12-01T18:53:06,288 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/table/857e7fb1f6c949af999479869477e52f 2024-12-01T18:53:06,295 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/info/01c4f94d24554576af05c3fa44c1c0b7 as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/info/01c4f94d24554576af05c3fa44c1c0b7 2024-12-01T18:53:06,304 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/info/01c4f94d24554576af05c3fa44c1c0b7, entries=10, sequenceid=11, filesize=6.5 K 2024-12-01T18:53:06,305 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/ns/cee3c7fb4dcb4bbbaaa848a28014e6c5 as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/ns/cee3c7fb4dcb4bbbaaa848a28014e6c5 2024-12-01T18:53:06,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33297-0x1016f5f9d7f0001, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,306 INFO [RS:0;9ec37ae3355e:33297 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:06,306 INFO [RS:0;9ec37ae3355e:33297 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,33297,1733079184189; zookeeper connection closed. 2024-12-01T18:53:06,307 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@97473a1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@97473a1 2024-12-01T18:53:06,313 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/ns/cee3c7fb4dcb4bbbaaa848a28014e6c5, entries=2, sequenceid=11, filesize=5.0 K 2024-12-01T18:53:06,314 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/.tmp/table/857e7fb1f6c949af999479869477e52f as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/table/857e7fb1f6c949af999479869477e52f 2024-12-01T18:53:06,322 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/table/857e7fb1f6c949af999479869477e52f, entries=2, sequenceid=11, filesize=5.1 K 2024-12-01T18:53:06,324 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false 2024-12-01T18:53:06,331 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-01T18:53:06,331 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-01T18:53:06,331 INFO [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-01T18:53:06,332 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733079186195Running coprocessor pre-close hooks at 1733079186195Disabling compacts and flushes for region at 1733079186195Disabling writes for close at 1733079186195Obtaining lock to block concurrent updates at 1733079186195Preparing flush snapshotting stores in 1588230740 at 1733079186195Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733079186196 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733079186197 (+1 ms)Flushing 1588230740/info: creating writer at 1733079186197Flushing 1588230740/info: appending metadata at 1733079186217 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733079186217Flushing 1588230740/ns: creating writer at 1733079186235 (+18 ms)Flushing 1588230740/ns: appending metadata at 1733079186249 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733079186249Flushing 1588230740/table: creating writer at 1733079186265 (+16 ms)Flushing 1588230740/table: appending metadata at 1733079186279 (+14 ms)Flushing 1588230740/table: closing flushed file at 1733079186279Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2453ee01: reopening flushed file at 1733079186294 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bc460b7: reopening flushed file at 1733079186304 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72d0b192: reopening flushed file at 1733079186313 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 129ms, sequenceid=11, compaction requested=false at 1733079186324 (+11 ms)Writing region close event to WAL at 1733079186326 (+2 ms)Running coprocessor post-close hooks at 1733079186331 (+5 ms)Closed at 1733079186331 2024-12-01T18:53:06,332 DEBUG [RS_CLOSE_META-regionserver/9ec37ae3355e:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-01T18:53:06,393 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,36231,1733079184218; all regions closed. 2024-12-01T18:53:06,393 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,394 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,394 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,394 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,394 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,395 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(976): stopping server 9ec37ae3355e,45713,1733079184247; all regions closed. 2024-12-01T18:53:06,395 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,395 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,395 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,395 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,396 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741833_1009 (size=1298) 2024-12-01T18:53:06,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741833_1009 (size=1298) 2024-12-01T18:53:06,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741833_1009 (size=1298) 2024-12-01T18:53:06,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741836_1012 (size=2751) 2024-12-01T18:53:06,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741836_1012 (size=2751) 2024-12-01T18:53:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741836_1012 (size=2751) 2024-12-01T18:53:06,402 DEBUG [RS:1;9ec37ae3355e:36231 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs 2024-12-01T18:53:06,402 INFO [RS:1;9ec37ae3355e:36231 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9ec37ae3355e%2C36231%2C1733079184218:(num 1733079184764) 2024-12-01T18:53:06,402 DEBUG [RS:1;9ec37ae3355e:36231 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:06,403 DEBUG [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs 2024-12-01T18:53:06,403 INFO [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9ec37ae3355e%2C45713%2C1733079184247.meta:.meta(num 1733079185102) 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-01T18:53:06,403 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:06,403 INFO [RS:1;9ec37ae3355e:36231 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36231 2024-12-01T18:53:06,404 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,404 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,404 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,404 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,405 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:06,407 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,36231,1733079184218 2024-12-01T18:53:06,407 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:06,408 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,36231,1733079184218] 2024-12-01T18:53:06,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741834_1010 (size=93) 2024-12-01T18:53:06,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741834_1010 (size=93) 2024-12-01T18:53:06,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741834_1010 (size=93) 2024-12-01T18:53:06,410 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,36231,1733079184218 already deleted, retry=false 2024-12-01T18:53:06,410 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,36231,1733079184218 expired; onlineServers=1 2024-12-01T18:53:06,509 INFO [RS:1;9ec37ae3355e:36231 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:06,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36231-0x1016f5f9d7f0002, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,509 INFO [RS:1;9ec37ae3355e:36231 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,36231,1733079184218; zookeeper connection closed. 2024-12-01T18:53:06,510 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4f9c2b77 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4f9c2b77 2024-12-01T18:53:06,683 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-01T18:53:06,683 INFO [regionserver/9ec37ae3355e:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-01T18:53:06,811 DEBUG [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/oldWALs 2024-12-01T18:53:06,811 INFO [RS:2;9ec37ae3355e:45713 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 9ec37ae3355e%2C45713%2C1733079184247:(num 1733079184774) 2024-12-01T18:53:06,811 DEBUG [RS:2;9ec37ae3355e:45713 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-01T18:53:06,811 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.LeaseManager(133): Closed leases 2024-12-01T18:53:06,812 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:06,812 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.ChoreService(370): Chore service for: regionserver/9ec37ae3355e:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:06,812 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:06,812 INFO [regionserver/9ec37ae3355e:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:06,812 INFO [RS:2;9ec37ae3355e:45713 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45713 2024-12-01T18:53:06,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/9ec37ae3355e,45713,1733079184247 2024-12-01T18:53:06,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-01T18:53:06,814 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:06,817 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [9ec37ae3355e,45713,1733079184247] 2024-12-01T18:53:06,818 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/9ec37ae3355e,45713,1733079184247 already deleted, retry=false 2024-12-01T18:53:06,818 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 9ec37ae3355e,45713,1733079184247 expired; onlineServers=0 2024-12-01T18:53:06,818 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '9ec37ae3355e,41027,1733079184138' ***** 2024-12-01T18:53:06,818 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-01T18:53:06,818 INFO [M:0;9ec37ae3355e:41027 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-01T18:53:06,818 INFO [M:0;9ec37ae3355e:41027 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-01T18:53:06,818 DEBUG [M:0;9ec37ae3355e:41027 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-01T18:53:06,818 DEBUG [M:0;9ec37ae3355e:41027 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-01T18:53:06,818 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-01T18:53:06,818 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079184478 {}] cleaner.HFileCleaner(306): Exit Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.small.0-1733079184478,5,FailOnTimeoutGroup] 2024-12-01T18:53:06,818 DEBUG [master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079184478 {}] cleaner.HFileCleaner(306): Exit Thread[master/9ec37ae3355e:0:becomeActiveMaster-HFileCleaner.large.0-1733079184478,5,FailOnTimeoutGroup] 2024-12-01T18:53:06,818 INFO [M:0;9ec37ae3355e:41027 {}] hbase.ChoreService(370): Chore service for: master/9ec37ae3355e:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-01T18:53:06,819 INFO [M:0;9ec37ae3355e:41027 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-01T18:53:06,819 DEBUG [M:0;9ec37ae3355e:41027 {}] master.HMaster(1795): Stopping service threads 2024-12-01T18:53:06,819 INFO [M:0;9ec37ae3355e:41027 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-01T18:53:06,819 INFO [M:0;9ec37ae3355e:41027 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-01T18:53:06,819 INFO [M:0;9ec37ae3355e:41027 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-01T18:53:06,819 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-01T18:53:06,819 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-01T18:53:06,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-01T18:53:06,820 DEBUG [M:0;9ec37ae3355e:41027 {}] zookeeper.ZKUtil(347): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-01T18:53:06,820 WARN [M:0;9ec37ae3355e:41027 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-01T18:53:06,820 INFO [M:0;9ec37ae3355e:41027 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/.lastflushedseqids 2024-12-01T18:53:06,822 WARN [IPC Server handler 4 on default port 36899 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T18:53:06,822 WARN [IPC Server handler 4 on default port 36899 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T18:53:06,823 WARN [IPC Server handler 4 on default port 36899 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T18:53:06,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741843_1019 (size=127) 2024-12-01T18:53:06,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741843_1019 (size=127) 2024-12-01T18:53:06,829 INFO [M:0;9ec37ae3355e:41027 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-01T18:53:06,829 INFO [M:0;9ec37ae3355e:41027 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-01T18:53:06,829 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-01T18:53:06,829 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:06,829 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:06,829 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-01T18:53:06,829 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:06,829 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.85 KB heapSize=34.13 KB 2024-12-01T18:53:06,847 DEBUG [M:0;9ec37ae3355e:41027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/828afb8e407642b9913db9e4991350cf is 82, key is hbase:meta,,1/info:regioninfo/1733079185135/Put/seqid=0 2024-12-01T18:53:06,848 WARN [IPC Server handler 3 on default port 36899 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-01T18:53:06,848 WARN [IPC Server handler 3 on default port 36899 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-01T18:53:06,848 WARN [IPC Server handler 3 on default port 36899 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-01T18:53:06,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741844_1020 (size=5672) 2024-12-01T18:53:06,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741844_1020 (size=5672) 2024-12-01T18:53:06,854 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/828afb8e407642b9913db9e4991350cf 2024-12-01T18:53:06,875 DEBUG [M:0;9ec37ae3355e:41027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b5b9071a97474c830a72cad34b2fbd is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733079185593/Put/seqid=0 2024-12-01T18:53:06,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741845_1021 (size=6441) 2024-12-01T18:53:06,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741845_1021 (size=6441) 2024-12-01T18:53:06,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741845_1021 (size=6441) 2024-12-01T18:53:06,882 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.17 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b5b9071a97474c830a72cad34b2fbd 2024-12-01T18:53:06,902 DEBUG [M:0;9ec37ae3355e:41027 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1aba47e9fac04815833d195fc763d6c1 is 69, key is 9ec37ae3355e,33297,1733079184189/rs:state/1733079184601/Put/seqid=0 2024-12-01T18:53:06,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741846_1022 (size=5294) 2024-12-01T18:53:06,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741846_1022 (size=5294) 2024-12-01T18:53:06,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741846_1022 (size=5294) 2024-12-01T18:53:06,909 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1aba47e9fac04815833d195fc763d6c1 2024-12-01T18:53:06,917 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/828afb8e407642b9913db9e4991350cf as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/828afb8e407642b9913db9e4991350cf 2024-12-01T18:53:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45713-0x1016f5f9d7f0003, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:06,917 INFO [RS:2;9ec37ae3355e:45713 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:06,917 INFO [RS:2;9ec37ae3355e:45713 {}] regionserver.HRegionServer(1031): Exiting; stopping=9ec37ae3355e,45713,1733079184247; zookeeper connection closed. 2024-12-01T18:53:06,917 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@84f6f4e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@84f6f4e 2024-12-01T18:53:06,918 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-01T18:53:06,924 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/828afb8e407642b9913db9e4991350cf, entries=8, sequenceid=72, filesize=5.5 K 2024-12-01T18:53:06,925 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/00b5b9071a97474c830a72cad34b2fbd as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00b5b9071a97474c830a72cad34b2fbd 2024-12-01T18:53:06,931 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/00b5b9071a97474c830a72cad34b2fbd, entries=8, sequenceid=72, filesize=6.3 K 2024-12-01T18:53:06,932 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/1aba47e9fac04815833d195fc763d6c1 as hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1aba47e9fac04815833d195fc763d6c1 2024-12-01T18:53:06,938 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36899/user/jenkins/test-data/362f2313-5737-c5c9-5254-66a6c67c19e2/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/1aba47e9fac04815833d195fc763d6c1, entries=3, sequenceid=72, filesize=5.2 K 2024-12-01T18:53:06,940 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=72, compaction requested=false 2024-12-01T18:53:06,941 INFO [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-01T18:53:06,941 DEBUG [M:0;9ec37ae3355e:41027 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733079186829Disabling compacts and flushes for region at 1733079186829Disabling writes for close at 1733079186829Obtaining lock to block concurrent updates at 1733079186829Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733079186829Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27492, getHeapSize=34888, getOffHeapSize=0, getCellsCount=85 at 1733079186830 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733079186831 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733079186831Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733079186846 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733079186846Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733079186860 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733079186874 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733079186874Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733079186888 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733079186901 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733079186902 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65dffce: reopening flushed file at 1733079186916 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@64c802e1: reopening flushed file at 1733079186924 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56f1dc17: reopening flushed file at 1733079186931 (+7 ms)Finished flush of dataSize ~26.85 KB/27492, heapSize ~33.84 KB/34648, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=72, compaction requested=false at 1733079186940 (+9 ms)Writing region close event to WAL at 1733079186941 (+1 ms)Closed at 1733079186941 2024-12-01T18:53:06,942 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,942 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,942 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,942 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,942 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-01T18:53:06,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34415 is added to blk_1073741830_1006 (size=32695) 2024-12-01T18:53:06,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42613 is added to blk_1073741830_1006 (size=32695) 2024-12-01T18:53:06,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35625 is added to blk_1073741830_1006 (size=32695) 2024-12-01T18:53:06,946 INFO [M:0;9ec37ae3355e:41027 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-01T18:53:06,946 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-01T18:53:06,946 INFO [M:0;9ec37ae3355e:41027 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41027 2024-12-01T18:53:06,946 INFO [M:0;9ec37ae3355e:41027 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-01T18:53:07,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:07,048 INFO [M:0;9ec37ae3355e:41027 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-01T18:53:07,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41027-0x1016f5f9d7f0000, quorum=127.0.0.1:61333, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-01T18:53:07,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4748a603{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:07,051 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2da73ce7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:07,051 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:07,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23ef21e8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:07,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7dddaf9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:07,053 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:07,053 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:07,053 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-226986105-172.17.0.2-1733079183114 (Datanode Uuid 7d3ec84b-fccb-4455-bd38-2c5c9d2834e7) service to localhost/127.0.0.1:36899 2024-12-01T18:53:07,053 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:07,053 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data5/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,054 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data6/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,054 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:07,057 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6fede0c8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:07,057 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@10113c37{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:07,057 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:07,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77da8076{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:07,058 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b209404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:07,059 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:07,059 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:07,059 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-226986105-172.17.0.2-1733079183114 (Datanode Uuid e06a0f37-e470-4de9-9120-e9381a88e12d) service to localhost/127.0.0.1:36899 2024-12-01T18:53:07,059 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:07,059 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data3/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,060 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data4/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,060 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:07,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@534992c5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-01T18:53:07,062 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@67e65606{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:07,062 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:07,062 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@585f6017{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:07,063 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@95e5bc7{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:07,063 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-01T18:53:07,063 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-01T18:53:07,064 WARN [BP-226986105-172.17.0.2-1733079183114 heartbeating to localhost/127.0.0.1:36899 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-226986105-172.17.0.2-1733079183114 (Datanode Uuid fbb4068c-d3c3-435f-aef3-b92db5dc2b10) service to localhost/127.0.0.1:36899 2024-12-01T18:53:07,064 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-01T18:53:07,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data1/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,064 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/cluster_27e7bc05-d2b1-55ad-568f-b0915fc5e322/data/data2/current/BP-226986105-172.17.0.2-1733079183114 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-01T18:53:07,065 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-01T18:53:07,070 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a29c5ea{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-01T18:53:07,071 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@30b20840{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-01T18:53:07,071 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-01T18:53:07,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a78787c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-01T18:53:07,071 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@44402c27{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/723978fc-faf3-a741-0f5d-a0af22339cef/hadoop.log.dir/,STOPPED} 2024-12-01T18:53:07,078 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-01T18:53:07,109 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-01T18:53:07,117 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=153 (was 93) - Thread LEAK? -, OpenFileDescriptor=521 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=133 (was 145), ProcessCount=11 (was 11), AvailableMemoryMB=4091 (was 4321)