2024-11-09 18:51:43,305 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 18:51:43,321 main DEBUG Took 0.012246 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-09 18:51:43,321 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-09 18:51:43,322 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-09 18:51:43,323 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-09 18:51:43,324 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,341 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-09 18:51:43,356 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,358 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,359 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,359 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,360 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,360 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,361 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,361 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,362 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,362 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,364 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,364 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,365 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,365 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,366 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,366 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,367 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,367 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,368 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,369 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,369 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,370 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,370 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,371 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-09 18:51:43,372 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,372 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-09 18:51:43,374 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-09 18:51:43,376 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-09 18:51:43,378 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-09 18:51:43,379 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-09 18:51:43,381 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-09 18:51:43,381 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-09 18:51:43,392 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-09 18:51:43,395 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-09 18:51:43,397 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-09 18:51:43,397 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-09 18:51:43,398 main DEBUG createAppenders(={Console}) 2024-11-09 18:51:43,399 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-09 18:51:43,399 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-09 18:51:43,400 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-09 18:51:43,400 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-09 18:51:43,401 main DEBUG OutputStream closed 2024-11-09 18:51:43,401 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-09 18:51:43,401 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-09 18:51:43,402 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-09 18:51:43,485 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-09 18:51:43,487 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-09 18:51:43,489 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-09 18:51:43,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-09 18:51:43,490 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-09 18:51:43,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-09 18:51:43,491 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-09 18:51:43,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-09 18:51:43,492 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-09 18:51:43,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-09 18:51:43,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-09 18:51:43,493 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-09 18:51:43,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-09 18:51:43,494 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-09 18:51:43,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-09 18:51:43,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-09 18:51:43,495 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-09 18:51:43,496 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-09 18:51:43,498 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09 18:51:43,498 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-09 18:51:43,499 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-09 18:51:43,500 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-09T18:51:43,515 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-09 18:51:43,518 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-09 18:51:43,518 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-09T18:51:43,809 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4 2024-11-09T18:51:43,843 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2, deleteOnExit=true 2024-11-09T18:51:43,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/test.cache.data in system properties and HBase conf 2024-11-09T18:51:43,845 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T18:51:43,846 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir in system properties and HBase conf 2024-11-09T18:51:43,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T18:51:43,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T18:51:43,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T18:51:43,948 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-09T18:51:44,088 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T18:51:44,093 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T18:51:44,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T18:51:44,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T18:51:44,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T18:51:44,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T18:51:44,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T18:51:44,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T18:51:44,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T18:51:44,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T18:51:44,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/nfs.dump.dir in system properties and HBase conf 2024-11-09T18:51:44,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/java.io.tmpdir in system properties and HBase conf 2024-11-09T18:51:44,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T18:51:44,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T18:51:44,102 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T18:51:45,403 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-09T18:51:45,475 INFO [Time-limited test {}] log.Log(170): Logging initialized @3006ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-09T18:51:45,557 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:45,639 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:45,666 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:45,666 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:45,669 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:45,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:45,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:45,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:45,929 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44270346{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/java.io.tmpdir/jetty-localhost-39637-hadoop-hdfs-3_4_1-tests_jar-_-any-11466755846350120054/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T18:51:45,942 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:39637} 2024-11-09T18:51:45,942 INFO [Time-limited test {}] server.Server(415): Started @3474ms 2024-11-09T18:51:47,085 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:47,094 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:47,095 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:47,095 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:47,096 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:47,096 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a55babc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:47,097 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e9e5394{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:47,212 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ec777b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/java.io.tmpdir/jetty-localhost-41099-hadoop-hdfs-3_4_1-tests_jar-_-any-3621091576482642612/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:47,213 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145f251e{HTTP/1.1, (http/1.1)}{localhost:41099} 2024-11-09T18:51:47,214 INFO [Time-limited test {}] server.Server(415): Started @4745ms 2024-11-09T18:51:47,266 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:47,387 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:47,394 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:47,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:47,397 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:47,397 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:47,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56f2bf79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:47,399 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19093484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:47,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1327a94d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/java.io.tmpdir/jetty-localhost-34203-hadoop-hdfs-3_4_1-tests_jar-_-any-4985566232925276698/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:47,511 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@674554fc{HTTP/1.1, (http/1.1)}{localhost:34203} 2024-11-09T18:51:47,511 INFO [Time-limited test {}] server.Server(415): Started @5042ms 2024-11-09T18:51:47,513 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:47,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:47,553 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:47,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:47,555 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:47,555 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:47,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1646e48a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:47,556 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3891561d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:47,687 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@554ba3d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/java.io.tmpdir/jetty-localhost-39361-hadoop-hdfs-3_4_1-tests_jar-_-any-2544761685869213478/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:47,688 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64a37729{HTTP/1.1, (http/1.1)}{localhost:39361} 2024-11-09T18:51:47,688 INFO [Time-limited test {}] server.Server(415): Started @5219ms 2024-11-09T18:51:47,690 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:49,263 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data1/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,263 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data3/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,263 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data2/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,263 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data4/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,292 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:51:49,292 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:51:49,338 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6839a162c70a0a7 with lease ID 0xe47df655d684d33d: Processing first storage report for DS-10d40756-7847-476c-8225-68f25a818c2c from datanode DatanodeRegistration(127.0.0.1:44911, datanodeUuid=4b63c7ba-9242-405d-b626-e72e5113f66b, infoPort=36463, infoSecurePort=0, ipcPort=37453, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,340 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6839a162c70a0a7 with lease ID 0xe47df655d684d33d: from storage DS-10d40756-7847-476c-8225-68f25a818c2c node DatanodeRegistration(127.0.0.1:44911, datanodeUuid=4b63c7ba-9242-405d-b626-e72e5113f66b, infoPort=36463, infoSecurePort=0, ipcPort=37453, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5932a2a1999cda3 with lease ID 0xe47df655d684d33e: Processing first storage report for DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6 from datanode DatanodeRegistration(127.0.0.1:43411, datanodeUuid=a6ad5aa7-2e63-4e0b-9b22-816e51f14e8f, infoPort=41165, infoSecurePort=0, ipcPort=40497, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5932a2a1999cda3 with lease ID 0xe47df655d684d33e: from storage DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6 node DatanodeRegistration(127.0.0.1:43411, datanodeUuid=a6ad5aa7-2e63-4e0b-9b22-816e51f14e8f, infoPort=41165, infoSecurePort=0, ipcPort=40497, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,341 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa6839a162c70a0a7 with lease ID 0xe47df655d684d33d: Processing first storage report for DS-b19529ba-731f-40c9-8e67-4eea1c9c3604 from datanode DatanodeRegistration(127.0.0.1:44911, datanodeUuid=4b63c7ba-9242-405d-b626-e72e5113f66b, infoPort=36463, infoSecurePort=0, ipcPort=37453, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,341 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa6839a162c70a0a7 with lease ID 0xe47df655d684d33d: from storage DS-b19529ba-731f-40c9-8e67-4eea1c9c3604 node DatanodeRegistration(127.0.0.1:44911, datanodeUuid=4b63c7ba-9242-405d-b626-e72e5113f66b, infoPort=36463, infoSecurePort=0, ipcPort=37453, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,342 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf5932a2a1999cda3 with lease ID 0xe47df655d684d33e: Processing first storage report for DS-693f81e7-838b-4c1f-9f65-74554b1765a7 from datanode DatanodeRegistration(127.0.0.1:43411, datanodeUuid=a6ad5aa7-2e63-4e0b-9b22-816e51f14e8f, infoPort=41165, infoSecurePort=0, ipcPort=40497, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,342 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf5932a2a1999cda3 with lease ID 0xe47df655d684d33e: from storage DS-693f81e7-838b-4c1f-9f65-74554b1765a7 node DatanodeRegistration(127.0.0.1:43411, datanodeUuid=a6ad5aa7-2e63-4e0b-9b22-816e51f14e8f, infoPort=41165, infoSecurePort=0, ipcPort=40497, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,481 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data6/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,481 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data5/current/BP-1136675770-172.17.0.3-1731178304792/current, will proceed with Du for space computation calculation, 2024-11-09T18:51:49,498 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:51:49,503 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a2a7e08141daf0f with lease ID 0xe47df655d684d33f: Processing first storage report for DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8 from datanode DatanodeRegistration(127.0.0.1:38899, datanodeUuid=e61bdbdf-bfb1-4797-92c3-bd42021f1e5a, infoPort=44077, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a2a7e08141daf0f with lease ID 0xe47df655d684d33f: from storage DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8 node DatanodeRegistration(127.0.0.1:38899, datanodeUuid=e61bdbdf-bfb1-4797-92c3-bd42021f1e5a, infoPort=44077, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6a2a7e08141daf0f with lease ID 0xe47df655d684d33f: Processing first storage report for DS-cdc2067f-e023-4adc-8cad-663616756892 from datanode DatanodeRegistration(127.0.0.1:38899, datanodeUuid=e61bdbdf-bfb1-4797-92c3-bd42021f1e5a, infoPort=44077, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792) 2024-11-09T18:51:49,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6a2a7e08141daf0f with lease ID 0xe47df655d684d33f: from storage DS-cdc2067f-e023-4adc-8cad-663616756892 node DatanodeRegistration(127.0.0.1:38899, datanodeUuid=e61bdbdf-bfb1-4797-92c3-bd42021f1e5a, infoPort=44077, infoSecurePort=0, ipcPort=44811, storageInfo=lv=-57;cid=testClusterID;nsid=1140273568;c=1731178304792), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T18:51:49,526 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4 2024-11-09T18:51:49,594 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-09T18:51:49,651 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=160, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=151, ProcessCount=11, AvailableMemoryMB=5512 2024-11-09T18:51:49,652 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T18:51:49,659 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-09T18:51:49,727 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/zookeeper_0, clientPort=51638, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T18:51:49,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51638 2024-11-09T18:51:49,746 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:49,748 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:49,862 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:49,862 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:49,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:42456 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42456 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:49,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-09T18:51:50,348 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:50,355 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b with version=8 2024-11-09T18:51:50,356 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/hbase-staging 2024-11-09T18:51:50,439 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-09T18:51:50,651 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:51:50,660 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:50,661 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:50,665 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:51:50,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:50,665 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:51:50,797 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T18:51:50,852 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-09T18:51:50,861 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-09T18:51:50,864 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:51:50,886 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 96835 (auto-detected) 2024-11-09T18:51:50,887 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-09T18:51:50,903 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45547 2024-11-09T18:51:50,923 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45547 connecting to ZooKeeper ensemble=127.0.0.1:51638 2024-11-09T18:51:51,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455470x0, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:51:51,048 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45547-0x10120f0a3f90000 connected 2024-11-09T18:51:51,145 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,159 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:51,164 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b, hbase.cluster.distributed=false 2024-11-09T18:51:51,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:51:51,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45547 2024-11-09T18:51:51,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45547 2024-11-09T18:51:51,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45547 2024-11-09T18:51:51,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45547 2024-11-09T18:51:51,197 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45547 2024-11-09T18:51:51,304 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:51:51,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,306 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,306 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:51:51,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,307 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:51:51,311 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:51:51,314 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:51:51,322 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:43101 2024-11-09T18:51:51,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43101 connecting to ZooKeeper ensemble=127.0.0.1:51638 2024-11-09T18:51:51,325 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,331 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431010x0, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:51:51,350 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:431010x0, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:51,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43101-0x10120f0a3f90001 connected 2024-11-09T18:51:51,355 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:51:51,365 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:51:51,368 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:51:51,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:51:51,376 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43101 2024-11-09T18:51:51,377 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43101 2024-11-09T18:51:51,378 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43101 2024-11-09T18:51:51,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43101 2024-11-09T18:51:51,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43101 2024-11-09T18:51:51,408 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:51:51,408 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,409 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,409 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:51:51,409 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,410 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:51:51,410 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:51:51,410 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:51:51,411 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:41163 2024-11-09T18:51:51,413 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41163 connecting to ZooKeeper ensemble=127.0.0.1:51638 2024-11-09T18:51:51,414 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:411630x0, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:51:51,434 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41163-0x10120f0a3f90002 connected 2024-11-09T18:51:51,434 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:51,434 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:51:51,435 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:51:51,437 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:51:51,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:51:51,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41163 2024-11-09T18:51:51,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41163 2024-11-09T18:51:51,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41163 2024-11-09T18:51:51,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41163 2024-11-09T18:51:51,443 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41163 2024-11-09T18:51:51,460 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:51:51,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:51:51,461 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:51:51,462 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44469 2024-11-09T18:51:51,464 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44469 connecting to ZooKeeper ensemble=127.0.0.1:51638 2024-11-09T18:51:51,466 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,468 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:444690x0, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:51:51,481 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:444690x0, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:51,481 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44469-0x10120f0a3f90003 connected 2024-11-09T18:51:51,482 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:51:51,483 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:51:51,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:51:51,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:51:51,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44469 2024-11-09T18:51:51,487 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44469 2024-11-09T18:51:51,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44469 2024-11-09T18:51:51,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44469 2024-11-09T18:51:51,490 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44469 2024-11-09T18:51:51,508 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fb97eb0edbe8:45547 2024-11-09T18:51:51,509 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:51,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,523 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,525 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,560 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T18:51:51,562 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fb97eb0edbe8,45547,1731178310487 from backup master directory 2024-11-09T18:51:51,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:51,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:51:51,576 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:51:51,576 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:51,578 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-09T18:51:51,579 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-09T18:51:51,636 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/hbase.id] with ID: 9dc5d6e1-e3ed-470f-b63b-31a779d1cc63 2024-11-09T18:51:51,636 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/.tmp/hbase.id 2024-11-09T18:51:51,643 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:51,644 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:51,648 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:49012 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:44911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49012 dst: /127.0.0.1:44911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:51,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-09T18:51:51,658 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:51,659 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/.tmp/hbase.id]:[hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/hbase.id] 2024-11-09T18:51:51,704 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:51:51,709 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T18:51:51,727 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-09T18:51:51,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:51,934 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:51,934 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:51,940 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:42780 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42780 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:51,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-09T18:51:51,949 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:51,966 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T18:51:51,969 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T18:51:51,975 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T18:51:52,003 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:52,004 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:52,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:42790 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42790 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:52,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-09T18:51:52,017 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:52,033 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store 2024-11-09T18:51:52,048 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:52,048 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:52,051 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:46478 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46478 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:52,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-09T18:51:52,057 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:52,061 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-09T18:51:52,065 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:52,067 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T18:51:52,067 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:52,067 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:52,069 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T18:51:52,070 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:52,070 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:52,071 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731178312067Disabling compacts and flushes for region at 1731178312067Disabling writes for close at 1731178312070 (+3 ms)Writing region close event to WAL at 1731178312070Closed at 1731178312070 2024-11-09T18:51:52,073 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/.initializing 2024-11-09T18:51:52,073 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/WALs/fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:52,083 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T18:51:52,098 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C45547%2C1731178310487, suffix=, logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/WALs/fb97eb0edbe8,45547,1731178310487, archiveDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/oldWALs, maxLogs=10 2024-11-09T18:51:52,129 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/WALs/fb97eb0edbe8,45547,1731178310487/fb97eb0edbe8%2C45547%2C1731178310487.1731178312103, exclude list is [], retry=0 2024-11-09T18:51:52,148 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:52,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38899,DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8,DISK] 2024-11-09T18:51:52,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43411,DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6,DISK] 2024-11-09T18:51:52,149 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44911,DS-10d40756-7847-476c-8225-68f25a818c2c,DISK] 2024-11-09T18:51:52,153 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-09T18:51:52,192 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/WALs/fb97eb0edbe8,45547,1731178310487/fb97eb0edbe8%2C45547%2C1731178310487.1731178312103 2024-11-09T18:51:52,193 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44077:44077),(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:36463:36463)] 2024-11-09T18:51:52,193 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:51:52,193 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:52,196 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,197 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,234 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T18:51:52,264 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:52,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:52,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,272 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T18:51:52,273 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:52,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:51:52,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T18:51:52,278 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:52,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:51:52,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T18:51:52,283 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:52,284 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:51:52,285 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,288 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,289 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,295 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,295 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,299 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:51:52,303 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:51:52,310 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:51:52,311 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61679244, jitterRate=-0.08090764284133911}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:51:52,322 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731178312209Initializing all the Stores at 1731178312211 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178312212 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178312212Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178312212Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178312212Cleaning up temporary data from old regions at 1731178312295 (+83 ms)Region opened successfully at 1731178312322 (+27 ms) 2024-11-09T18:51:52,324 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T18:51:52,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-09T18:51:52,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-09T18:51:52,361 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@77246d7f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:51:52,393 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T18:51:52,404 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T18:51:52,404 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T18:51:52,407 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T18:51:52,408 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-09T18:51:52,414 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-09T18:51:52,414 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T18:51:52,444 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T18:51:52,453 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T18:51:53,233 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T18:51:53,236 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T18:51:53,238 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T18:51:53,327 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T18:51:53,330 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T18:51:53,334 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T18:51:53,343 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T18:51:53,345 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T18:51:53,353 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T18:51:53,375 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T18:51:53,385 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,396 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:53,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,400 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fb97eb0edbe8,45547,1731178310487, sessionid=0x10120f0a3f90000, setting cluster-up flag (Was=false) 2024-11-09T18:51:53,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,648 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T18:51:53,651 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:53,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:53,706 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T18:51:53,708 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:53,714 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T18:51:53,781 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T18:51:53,790 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T18:51:53,798 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T18:51:53,798 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(746): ClusterId : 9dc5d6e1-e3ed-470f-b63b-31a779d1cc63 2024-11-09T18:51:53,799 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(746): ClusterId : 9dc5d6e1-e3ed-470f-b63b-31a779d1cc63 2024-11-09T18:51:53,799 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(746): ClusterId : 9dc5d6e1-e3ed-470f-b63b-31a779d1cc63 2024-11-09T18:51:53,801 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:51:53,801 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:51:53,801 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:51:53,805 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fb97eb0edbe8,45547,1731178310487 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T18:51:53,824 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:51:53,824 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:51:53,824 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:51:53,824 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:51:53,825 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:51:53,825 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:51:53,825 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:51:53,825 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fb97eb0edbe8:0, corePoolSize=10, maxPoolSize=10 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:51:53,826 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:53,833 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731178343833 2024-11-09T18:51:53,835 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T18:51:53,836 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:51:53,837 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T18:51:53,837 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T18:51:53,842 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T18:51:53,842 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T18:51:53,843 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T18:51:53,843 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T18:51:53,844 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:53,845 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T18:51:53,844 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:53,849 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T18:51:53,850 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:51:53,850 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:51:53,850 DEBUG [RS:1;fb97eb0edbe8:41163 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ca55d7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:51:53,850 DEBUG [RS:2;fb97eb0edbe8:44469 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9daeba7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:51:53,851 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:51:53,851 DEBUG [RS:0;fb97eb0edbe8:43101 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c01e542, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:51:53,852 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T18:51:53,853 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T18:51:53,859 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T18:51:53,859 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T18:51:53,862 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178313861,5,FailOnTimeoutGroup] 2024-11-09T18:51:53,863 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:53,863 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:53,866 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fb97eb0edbe8:43101 2024-11-09T18:51:53,869 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:51:53,869 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:51:53,869 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:51:53,870 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fb97eb0edbe8:41163 2024-11-09T18:51:53,870 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:51:53,870 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:51:53,870 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178313862,5,FailOnTimeoutGroup] 2024-11-09T18:51:53,870 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:51:53,870 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:53,870 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T18:51:53,872 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:53,872 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:53,873 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45547,1731178310487 with port=43101, startcode=1731178311267 2024-11-09T18:51:53,873 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45547,1731178310487 with port=41163, startcode=1731178311407 2024-11-09T18:51:53,873 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fb97eb0edbe8:44469 2024-11-09T18:51:53,873 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:51:53,873 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:51:53,873 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:51:53,873 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:46514 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46514 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:53,875 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45547,1731178310487 with port=44469, startcode=1731178311459 2024-11-09T18:51:53,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-09T18:51:53,884 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:53,886 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T18:51:53,886 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b 2024-11-09T18:51:53,888 DEBUG [RS:0;fb97eb0edbe8:43101 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:51:53,888 DEBUG [RS:1;fb97eb0edbe8:41163 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:51:53,888 DEBUG [RS:2;fb97eb0edbe8:44469 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:51:53,894 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:53,894 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:53,897 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:46536 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46536 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:53,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-09T18:51:53,913 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:53,914 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:53,920 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T18:51:53,924 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T18:51:53,924 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:53,925 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:53,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T18:51:53,929 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T18:51:53,929 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:53,931 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:53,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T18:51:53,935 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:53369, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:51:53,935 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44955, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:51:53,935 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:33867, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:51:53,935 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T18:51:53,935 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:53,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:53,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T18:51:53,941 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:53,943 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:53,948 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T18:51:53,948 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:53,950 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:53,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T18:51:53,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740 2024-11-09T18:51:53,953 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740 2024-11-09T18:51:53,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:53,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:53,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T18:51:53,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T18:51:53,958 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:53,959 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45547 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:53,960 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b 2024-11-09T18:51:53,960 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b 2024-11-09T18:51:53,960 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:51:53,960 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34993 2024-11-09T18:51:53,960 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34993 2024-11-09T18:51:53,960 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:51:53,960 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:51:53,964 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T18:51:53,964 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b 2024-11-09T18:51:53,964 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34993 2024-11-09T18:51:53,964 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:51:53,971 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:51:53,972 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74486532, jitterRate=0.10993582010269165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:51:53,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:51:53,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731178313915Initializing all the Stores at 1731178313917 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178313918 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178313920 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178313920Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178313920Cleaning up temporary data from old regions at 1731178313958 (+38 ms)Region opened successfully at 1731178313978 (+20 ms) 2024-11-09T18:51:53,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T18:51:53,978 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T18:51:53,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T18:51:53,979 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T18:51:53,979 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T18:51:53,980 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T18:51:53,981 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731178313978Disabling compacts and flushes for region at 1731178313978Disabling writes for close at 1731178313979 (+1 ms)Writing region close event to WAL at 1731178313980 (+1 ms)Closed at 1731178313980 2024-11-09T18:51:53,984 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:51:53,984 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T18:51:53,993 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T18:51:54,002 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T18:51:54,006 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T18:51:54,015 DEBUG [RS:2;fb97eb0edbe8:44469 {}] zookeeper.ZKUtil(111): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,015 WARN [RS:2;fb97eb0edbe8:44469 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:51:54,015 INFO [RS:2;fb97eb0edbe8:44469 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T18:51:54,015 DEBUG [RS:0;fb97eb0edbe8:43101 {}] zookeeper.ZKUtil(111): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:54,015 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,015 DEBUG [RS:1;fb97eb0edbe8:41163 {}] zookeeper.ZKUtil(111): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:54,015 WARN [RS:0;fb97eb0edbe8:43101 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:51:54,015 WARN [RS:1;fb97eb0edbe8:41163 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:51:54,015 INFO [RS:0;fb97eb0edbe8:43101 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T18:51:54,015 INFO [RS:1;fb97eb0edbe8:41163 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T18:51:54,016 DEBUG [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:54,016 DEBUG [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:54,017 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,41163,1731178311407] 2024-11-09T18:51:54,017 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,43101,1731178311267] 2024-11-09T18:51:54,017 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,44469,1731178311459] 2024-11-09T18:51:54,041 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:51:54,041 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:51:54,041 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:51:54,057 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:51:54,057 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:51:54,057 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:51:54,062 INFO [RS:2;fb97eb0edbe8:44469 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:51:54,062 INFO [RS:1;fb97eb0edbe8:41163 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:51:54,062 INFO [RS:0;fb97eb0edbe8:43101 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:51:54,062 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,062 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,062 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,063 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:51:54,063 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:51:54,063 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:51:54,070 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:51:54,070 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:51:54,070 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:51:54,072 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,072 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,072 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:51:54,073 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,073 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,074 DEBUG [RS:1;fb97eb0edbe8:41163 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,074 DEBUG [RS:0;fb97eb0edbe8:43101 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,074 DEBUG [RS:2;fb97eb0edbe8:44469 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:51:54,076 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,076 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,076 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,077 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,077 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,077 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,41163,1731178311407-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,43101,1731178311267-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:51:54,082 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,44469,1731178311459-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:51:54,103 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:51:54,105 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,44469,1731178311459-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,105 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:51:54,105 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,106 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.Replication(171): fb97eb0edbe8,44469,1731178311459 started 2024-11-09T18:51:54,106 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,41163,1731178311407-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,106 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,106 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.Replication(171): fb97eb0edbe8,41163,1731178311407 started 2024-11-09T18:51:54,106 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:51:54,107 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,43101,1731178311267-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,107 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,107 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.Replication(171): fb97eb0edbe8,43101,1731178311267 started 2024-11-09T18:51:54,132 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,132 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,132 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:54,133 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,44469,1731178311459, RpcServer on fb97eb0edbe8/172.17.0.3:44469, sessionid=0x10120f0a3f90003 2024-11-09T18:51:54,133 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,43101,1731178311267, RpcServer on fb97eb0edbe8/172.17.0.3:43101, sessionid=0x10120f0a3f90001 2024-11-09T18:51:54,133 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,41163,1731178311407, RpcServer on fb97eb0edbe8/172.17.0.3:41163, sessionid=0x10120f0a3f90002 2024-11-09T18:51:54,134 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:51:54,134 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:51:54,134 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:51:54,134 DEBUG [RS:1;fb97eb0edbe8:41163 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:54,134 DEBUG [RS:2;fb97eb0edbe8:44469 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,134 DEBUG [RS:0;fb97eb0edbe8:43101 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:54,134 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,41163,1731178311407' 2024-11-09T18:51:54,134 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,44469,1731178311459' 2024-11-09T18:51:54,134 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,43101,1731178311267' 2024-11-09T18:51:54,134 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:51:54,134 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:51:54,134 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:51:54,135 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:51:54,135 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:51:54,135 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:51:54,136 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:51:54,136 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:51:54,136 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:51:54,136 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:51:54,136 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:51:54,136 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:51:54,136 DEBUG [RS:0;fb97eb0edbe8:43101 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:54,136 DEBUG [RS:2;fb97eb0edbe8:44469 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,136 DEBUG [RS:1;fb97eb0edbe8:41163 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:54,136 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,44469,1731178311459' 2024-11-09T18:51:54,136 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,41163,1731178311407' 2024-11-09T18:51:54,136 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,43101,1731178311267' 2024-11-09T18:51:54,136 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:51:54,136 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:51:54,136 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:51:54,137 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:51:54,137 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:51:54,137 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:51:54,138 DEBUG [RS:2;fb97eb0edbe8:44469 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:51:54,138 DEBUG [RS:1;fb97eb0edbe8:41163 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:51:54,138 DEBUG [RS:0;fb97eb0edbe8:43101 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:51:54,138 INFO [RS:2;fb97eb0edbe8:44469 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:51:54,138 INFO [RS:0;fb97eb0edbe8:43101 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:51:54,138 INFO [RS:1;fb97eb0edbe8:41163 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:51:54,138 INFO [RS:2;fb97eb0edbe8:44469 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:51:54,138 INFO [RS:0;fb97eb0edbe8:43101 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:51:54,138 INFO [RS:1;fb97eb0edbe8:41163 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:51:54,157 WARN [fb97eb0edbe8:45547 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T18:51:54,243 INFO [RS:1;fb97eb0edbe8:41163 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T18:51:54,243 INFO [RS:2;fb97eb0edbe8:44469 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T18:51:54,243 INFO [RS:0;fb97eb0edbe8:43101 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-09T18:51:54,246 INFO [RS:0;fb97eb0edbe8:43101 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C43101%2C1731178311267, suffix=, logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,43101,1731178311267, archiveDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs, maxLogs=32 2024-11-09T18:51:54,246 INFO [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C44469%2C1731178311459, suffix=, logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459, archiveDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs, maxLogs=32 2024-11-09T18:51:54,246 INFO [RS:1;fb97eb0edbe8:41163 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C41163%2C1731178311407, suffix=, logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,41163,1731178311407, archiveDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs, maxLogs=32 2024-11-09T18:51:54,266 DEBUG [RS:2;fb97eb0edbe8:44469 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459/fb97eb0edbe8%2C44469%2C1731178311459.1731178314250, exclude list is [], retry=0 2024-11-09T18:51:54,267 DEBUG [RS:1;fb97eb0edbe8:41163 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,41163,1731178311407/fb97eb0edbe8%2C41163%2C1731178311407.1731178314251, exclude list is [], retry=0 2024-11-09T18:51:54,267 DEBUG [RS:0;fb97eb0edbe8:43101 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,43101,1731178311267/fb97eb0edbe8%2C43101%2C1731178311267.1731178314251, exclude list is [], retry=0 2024-11-09T18:51:54,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38899,DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8,DISK] 2024-11-09T18:51:54,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38899,DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8,DISK] 2024-11-09T18:51:54,272 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43411,DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6,DISK] 2024-11-09T18:51:54,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43411,DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6,DISK] 2024-11-09T18:51:54,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44911,DS-10d40756-7847-476c-8225-68f25a818c2c,DISK] 2024-11-09T18:51:54,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44911,DS-10d40756-7847-476c-8225-68f25a818c2c,DISK] 2024-11-09T18:51:54,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38899,DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8,DISK] 2024-11-09T18:51:54,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43411,DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6,DISK] 2024-11-09T18:51:54,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44911,DS-10d40756-7847-476c-8225-68f25a818c2c,DISK] 2024-11-09T18:51:54,307 INFO [RS:1;fb97eb0edbe8:41163 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,41163,1731178311407/fb97eb0edbe8%2C41163%2C1731178311407.1731178314251 2024-11-09T18:51:54,308 INFO [RS:0;fb97eb0edbe8:43101 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,43101,1731178311267/fb97eb0edbe8%2C43101%2C1731178311267.1731178314251 2024-11-09T18:51:54,309 DEBUG [RS:1;fb97eb0edbe8:41163 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44077:44077),(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:36463:36463)] 2024-11-09T18:51:54,310 INFO [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459/fb97eb0edbe8%2C44469%2C1731178311459.1731178314250 2024-11-09T18:51:54,310 DEBUG [RS:0;fb97eb0edbe8:43101 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:36463:36463),(127.0.0.1/127.0.0.1:44077:44077)] 2024-11-09T18:51:54,312 DEBUG [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36463:36463),(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:44077:44077)] 2024-11-09T18:51:54,409 DEBUG [fb97eb0edbe8:45547 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T18:51:54,420 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(204): Hosts are {fb97eb0edbe8=0} racks are {/default-rack=0} 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T18:51:54,448 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T18:51:54,448 INFO [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T18:51:54,448 INFO [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T18:51:54,448 INFO [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T18:51:54,449 DEBUG [fb97eb0edbe8:45547 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T18:51:54,456 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,469 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fb97eb0edbe8,44469,1731178311459, state=OPENING 2024-11-09T18:51:54,495 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T18:51:54,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:54,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:54,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:54,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:54,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,507 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,509 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T18:51:54,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,44469,1731178311459}] 2024-11-09T18:51:54,685 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T18:51:54,687 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42247, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T18:51:54,701 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T18:51:54,701 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-09T18:51:54,702 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-09T18:51:54,706 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C44469%2C1731178311459.meta, suffix=.meta, logDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459, archiveDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs, maxLogs=32 2024-11-09T18:51:54,728 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459/fb97eb0edbe8%2C44469%2C1731178311459.meta.1731178314709.meta, exclude list is [], retry=0 2024-11-09T18:51:54,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43411,DS-70bdd7cc-485a-4b78-99e9-62cd5a1cbbf6,DISK] 2024-11-09T18:51:54,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:38899,DS-7cda7003-85ec-4c0d-a642-3ac5b176cdc8,DISK] 2024-11-09T18:51:54,734 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44911,DS-10d40756-7847-476c-8225-68f25a818c2c,DISK] 2024-11-09T18:51:54,738 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,44469,1731178311459/fb97eb0edbe8%2C44469%2C1731178311459.meta.1731178314709.meta 2024-11-09T18:51:54,739 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:44077:44077),(127.0.0.1/127.0.0.1:36463:36463)] 2024-11-09T18:51:54,739 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:51:54,741 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T18:51:54,743 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T18:51:54,749 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T18:51:54,753 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T18:51:54,754 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:54,754 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T18:51:54,754 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T18:51:54,758 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T18:51:54,759 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T18:51:54,760 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:54,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:54,761 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T18:51:54,762 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T18:51:54,762 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:54,763 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:54,764 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T18:51:54,765 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T18:51:54,765 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:54,766 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:54,767 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T18:51:54,768 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T18:51:54,768 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:54,769 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:51:54,770 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T18:51:54,771 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740 2024-11-09T18:51:54,775 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740 2024-11-09T18:51:54,778 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T18:51:54,778 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T18:51:54,779 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:51:54,783 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T18:51:54,786 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66133746, jitterRate=-0.014530390501022339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:51:54,786 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T18:51:54,788 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731178314755Writing region info on filesystem at 1731178314755Initializing all the Stores at 1731178314757 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178314757Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178314757Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178314758 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178314758Cleaning up temporary data from old regions at 1731178314778 (+20 ms)Running coprocessor post-open hooks at 1731178314786 (+8 ms)Region opened successfully at 1731178314788 (+2 ms) 2024-11-09T18:51:54,797 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731178314677 2024-11-09T18:51:54,811 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T18:51:54,812 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T18:51:54,815 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,818 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fb97eb0edbe8,44469,1731178311459, state=OPEN 2024-11-09T18:51:54,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:51:54,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:51:54,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:51:54,827 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:51:54,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,827 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:51:54,828 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:54,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T18:51:54,835 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,44469,1731178311459 in 318 msec 2024-11-09T18:51:54,843 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T18:51:54,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 845 msec 2024-11-09T18:51:54,846 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:51:54,846 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T18:51:54,871 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T18:51:54,873 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fb97eb0edbe8,44469,1731178311459, seqNum=-1] 2024-11-09T18:51:54,899 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T18:51:54,901 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49867, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T18:51:54,927 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1830 sec 2024-11-09T18:51:54,927 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731178314927, completionTime=-1 2024-11-09T18:51:54,931 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T18:51:54,931 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T18:51:54,996 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T18:51:54,996 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731178374996 2024-11-09T18:51:54,996 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731178434996 2024-11-09T18:51:54,996 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 64 msec 2024-11-09T18:51:55,000 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T18:51:55,015 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,016 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,016 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,018 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fb97eb0edbe8:45547, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,019 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,030 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,036 DEBUG [master/fb97eb0edbe8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T18:51:55,066 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.490sec 2024-11-09T18:51:55,068 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T18:51:55,069 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T18:51:55,070 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T18:51:55,071 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T18:51:55,071 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T18:51:55,072 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:51:55,073 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T18:51:55,079 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T18:51:55,081 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T18:51:55,081 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45547,1731178310487-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:51:55,111 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e88c206, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:51:55,116 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-09T18:51:55,116 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-09T18:51:55,119 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fb97eb0edbe8,45547,-1 for getting cluster id 2024-11-09T18:51:55,122 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T18:51:55,131 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9dc5d6e1-e3ed-470f-b63b-31a779d1cc63' 2024-11-09T18:51:55,133 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T18:51:55,134 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9dc5d6e1-e3ed-470f-b63b-31a779d1cc63" 2024-11-09T18:51:55,136 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@162f3029, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:51:55,136 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fb97eb0edbe8,45547,-1] 2024-11-09T18:51:55,139 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T18:51:55,142 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:55,143 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43720, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T18:51:55,146 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a9bea04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:51:55,147 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T18:51:55,155 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fb97eb0edbe8,44469,1731178311459, seqNum=-1] 2024-11-09T18:51:55,155 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T18:51:55,157 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51060, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T18:51:55,181 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:55,186 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T18:51:55,191 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:55,194 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@42f1365a 2024-11-09T18:51:55,195 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T18:51:55,197 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43732, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T18:51:55,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T18:51:55,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T18:51:55,216 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T18:51:55,219 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T18:51:55,219 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:55,223 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T18:51:55,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:51:55,241 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:55,241 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:55,248 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:42828 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42828 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:55,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-09T18:51:55,254 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:55,257 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => bc55f1a8ca42900d8121f8f9e47f01d4, NAME => 'TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b 2024-11-09T18:51:55,263 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:55,263 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:55,268 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:56054 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:44911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56054 dst: /127.0.0.1:44911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:55,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-09T18:51:55,277 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:55,277 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:55,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing bc55f1a8ca42900d8121f8f9e47f01d4, disabling compactions & flushes 2024-11-09T18:51:55,278 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. after waiting 0 ms 2024-11-09T18:51:55,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,278 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,278 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for bc55f1a8ca42900d8121f8f9e47f01d4: Waiting for close lock at 1731178315277Disabling compacts and flushes for region at 1731178315277Disabling writes for close at 1731178315278 (+1 ms)Writing region close event to WAL at 1731178315278Closed at 1731178315278 2024-11-09T18:51:55,281 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T18:51:55,287 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731178315281"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731178315281"}]},"ts":"1731178315281"} 2024-11-09T18:51:55,293 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T18:51:55,296 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T18:51:55,299 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731178315296"}]},"ts":"1731178315296"} 2024-11-09T18:51:55,304 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T18:51:55,305 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fb97eb0edbe8=0} racks are {/default-rack=0} 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T18:51:55,307 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T18:51:55,307 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T18:51:55,307 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T18:51:55,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T18:51:55,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bc55f1a8ca42900d8121f8f9e47f01d4, ASSIGN}] 2024-11-09T18:51:55,312 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bc55f1a8ca42900d8121f8f9e47f01d4, ASSIGN 2024-11-09T18:51:55,316 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bc55f1a8ca42900d8121f8f9e47f01d4, ASSIGN; state=OFFLINE, location=fb97eb0edbe8,44469,1731178311459; forceNewPlan=false, retain=false 2024-11-09T18:51:55,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-09T18:51:55,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-09T18:51:55,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-09T18:51:55,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:51:55,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-09T18:51:55,470 INFO [fb97eb0edbe8:45547 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T18:51:55,471 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc55f1a8ca42900d8121f8f9e47f01d4, regionState=OPENING, regionLocation=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:55,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bc55f1a8ca42900d8121f8f9e47f01d4, ASSIGN because future has completed 2024-11-09T18:51:55,478 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc55f1a8ca42900d8121f8f9e47f01d4, server=fb97eb0edbe8,44469,1731178311459}] 2024-11-09T18:51:55,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-09T18:51:55,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-09T18:51:55,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-09T18:51:55,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-09T18:51:55,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:51:55,644 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,645 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => bc55f1a8ca42900d8121f8f9e47f01d4, NAME => 'TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4.', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:51:55,645 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,645 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:51:55,645 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,645 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,648 INFO [StoreOpener-bc55f1a8ca42900d8121f8f9e47f01d4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,651 INFO [StoreOpener-bc55f1a8ca42900d8121f8f9e47f01d4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bc55f1a8ca42900d8121f8f9e47f01d4 columnFamilyName cf 2024-11-09T18:51:55,651 DEBUG [StoreOpener-bc55f1a8ca42900d8121f8f9e47f01d4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:51:55,652 INFO [StoreOpener-bc55f1a8ca42900d8121f8f9e47f01d4-1 {}] regionserver.HStore(327): Store=bc55f1a8ca42900d8121f8f9e47f01d4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:51:55,653 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,654 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,655 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,655 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,655 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,658 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,664 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:51:55,664 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened bc55f1a8ca42900d8121f8f9e47f01d4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61843913, jitterRate=-0.07845388352870941}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T18:51:55,665 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:55,666 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for bc55f1a8ca42900d8121f8f9e47f01d4: Running coprocessor pre-open hook at 1731178315646Writing region info on filesystem at 1731178315646Initializing all the Stores at 1731178315647 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178315648 (+1 ms)Cleaning up temporary data from old regions at 1731178315656 (+8 ms)Running coprocessor post-open hooks at 1731178315665 (+9 ms)Region opened successfully at 1731178315666 (+1 ms) 2024-11-09T18:51:55,672 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4., pid=6, masterSystemTime=1731178315634 2024-11-09T18:51:55,675 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,676 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:55,681 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=bc55f1a8ca42900d8121f8f9e47f01d4, regionState=OPEN, openSeqNum=2, regionLocation=fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:55,685 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure bc55f1a8ca42900d8121f8f9e47f01d4, server=fb97eb0edbe8,44469,1731178311459 because future has completed 2024-11-09T18:51:55,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T18:51:55,692 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure bc55f1a8ca42900d8121f8f9e47f01d4, server=fb97eb0edbe8,44469,1731178311459 in 209 msec 2024-11-09T18:51:55,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T18:51:55,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=bc55f1a8ca42900d8121f8f9e47f01d4, ASSIGN in 383 msec 2024-11-09T18:51:55,697 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T18:51:55,698 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731178315697"}]},"ts":"1731178315697"} 2024-11-09T18:51:55,701 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T18:51:55,703 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T18:51:55,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 496 msec 2024-11-09T18:51:55,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:51:55,863 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T18:51:55,863 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T18:51:55,864 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T18:51:55,871 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T18:51:55,871 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T18:51:55,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T18:51:55,882 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4., hostname=fb97eb0edbe8,44469,1731178311459, seqNum=2] 2024-11-09T18:51:55,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-09T18:51:55,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T18:51:55,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:51:55,901 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T18:51:55,904 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T18:51:55,906 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T18:51:56,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:51:56,067 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44469 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T18:51:56,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,072 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing bc55f1a8ca42900d8121f8f9e47f01d4 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T18:51:56,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/.tmp/cf/6f99ccf3f50d4f6095383fe60d76af7a is 36, key is row/cf:cq/1731178315885/Put/seqid=0 2024-11-09T18:51:56,131 WARN [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,131 WARN [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,135 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1362927919_22 at /127.0.0.1:42872 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42872 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:56,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-09T18:51:56,146 WARN [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:56,147 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/.tmp/cf/6f99ccf3f50d4f6095383fe60d76af7a 2024-11-09T18:51:56,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/.tmp/cf/6f99ccf3f50d4f6095383fe60d76af7a as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/cf/6f99ccf3f50d4f6095383fe60d76af7a 2024-11-09T18:51:56,202 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/cf/6f99ccf3f50d4f6095383fe60d76af7a, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T18:51:56,211 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for bc55f1a8ca42900d8121f8f9e47f01d4 in 136ms, sequenceid=5, compaction requested=false 2024-11-09T18:51:56,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-09T18:51:56,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for bc55f1a8ca42900d8121f8f9e47f01d4: 2024-11-09T18:51:56,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T18:51:56,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45547 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T18:51:56,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:51:56,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T18:51:56,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 317 msec 2024-11-09T18:51:56,234 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 337 msec 2024-11-09T18:51:56,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=45547 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:51:56,532 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T18:51:56,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T18:51:56,547 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T18:51:56,547 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:51:56,551 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,552 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,552 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T18:51:56,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T18:51:56,552 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1407995813, stopped=false 2024-11-09T18:51:56,552 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fb97eb0edbe8,45547,1731178310487 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:56,632 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:51:56,633 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T18:51:56,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:56,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:56,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:56,633 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T18:51:56,633 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:56,633 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:51:56,634 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:56,634 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,634 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,41163,1731178311407' ***** 2024-11-09T18:51:56,634 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,43101,1731178311267' ***** 2024-11-09T18:51:56,634 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(2210): STOPPED: Exiting; cluster shutdown set and not carrying any regions 2024-11-09T18:51:56,634 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:51:56,635 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:51:56,635 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:51:56,635 INFO [RS:0;fb97eb0edbe8:43101 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fb97eb0edbe8:43101. 2024-11-09T18:51:56,635 DEBUG [RS:0;fb97eb0edbe8:43101 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:51:56,635 DEBUG [RS:0;fb97eb0edbe8:43101 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,636 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,44469,1731178311459' ***** 2024-11-09T18:51:56,636 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T18:51:56,636 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,43101,1731178311267; all regions closed. 2024-11-09T18:51:56,636 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:51:56,636 INFO [RS:2;fb97eb0edbe8:44469 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:51:56,636 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:51:56,636 INFO [RS:2;fb97eb0edbe8:44469 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:51:56,637 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(3091): Received CLOSE for bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:56,637 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:56,637 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:51:56,637 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:51:56,637 INFO [RS:1;fb97eb0edbe8:41163 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:51:56,637 INFO [RS:2;fb97eb0edbe8:44469 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fb97eb0edbe8:44469. 2024-11-09T18:51:56,637 INFO [RS:1;fb97eb0edbe8:41163 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:51:56,637 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:51:56,637 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:56,637 DEBUG [RS:2;fb97eb0edbe8:44469 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:51:56,637 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:51:56,638 DEBUG [RS:2;fb97eb0edbe8:44469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,638 INFO [RS:1;fb97eb0edbe8:41163 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fb97eb0edbe8:41163. 2024-11-09T18:51:56,638 DEBUG [RS:1;fb97eb0edbe8:41163 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:51:56,638 DEBUG [RS:1;fb97eb0edbe8:41163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,638 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:51:56,638 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,41163,1731178311407; all regions closed. 2024-11-09T18:51:56,638 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:51:56,638 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:51:56,638 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T18:51:56,639 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing bc55f1a8ca42900d8121f8f9e47f01d4, disabling compactions & flushes 2024-11-09T18:51:56,639 INFO [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,639 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,639 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. after waiting 0 ms 2024-11-09T18:51:56,639 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,639 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-09T18:51:56,639 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T18:51:56,639 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1325): Online Regions={bc55f1a8ca42900d8121f8f9e47f01d4=TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4., 1588230740=hbase:meta,,1.1588230740} 2024-11-09T18:51:56,640 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T18:51:56,640 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T18:51:56,640 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, bc55f1a8ca42900d8121f8f9e47f01d4 2024-11-09T18:51:56,640 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T18:51:56,640 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T18:51:56,640 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T18:51:56,644 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,43101,1731178311267/fb97eb0edbe8%2C43101%2C1731178311267.1731178314251 not finished, retry = 0 2024-11-09T18:51:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_1073741828_1018 (size=93) 2024-11-09T18:51:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_1073741828_1018 (size=93) 2024-11-09T18:51:56,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_1073741828_1018 (size=93) 2024-11-09T18:51:56,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_1073741827_1017 (size=93) 2024-11-09T18:51:56,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_1073741827_1017 (size=93) 2024-11-09T18:51:56,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_1073741827_1017 (size=93) 2024-11-09T18:51:56,655 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/WALs/fb97eb0edbe8,41163,1731178311407/fb97eb0edbe8%2C41163%2C1731178311407.1731178314251 not finished, retry = 0 2024-11-09T18:51:56,670 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/default/TestHBaseWalOnEC/bc55f1a8ca42900d8121f8f9e47f01d4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T18:51:56,673 INFO [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,673 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for bc55f1a8ca42900d8121f8f9e47f01d4: Waiting for close lock at 1731178316638Running coprocessor pre-close hooks at 1731178316639 (+1 ms)Disabling compacts and flushes for region at 1731178316639Disabling writes for close at 1731178316639Writing region close event to WAL at 1731178316652 (+13 ms)Running coprocessor post-close hooks at 1731178316671 (+19 ms)Closed at 1731178316673 (+2 ms) 2024-11-09T18:51:56,674 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4. 2024-11-09T18:51:56,684 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/info/61e0a495e8a0425d940f3104627aca8c is 153, key is TestHBaseWalOnEC,,1731178315198.bc55f1a8ca42900d8121f8f9e47f01d4./info:regioninfo/1731178315681/Put/seqid=0 2024-11-09T18:51:56,686 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:56,687 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:56,687 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:56,688 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,688 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,696 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1362927919_22 at /127.0.0.1:46614 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46614 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:56,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-09T18:51:56,703 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:56,703 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/info/61e0a495e8a0425d940f3104627aca8c 2024-11-09T18:51:56,730 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/ns/4ead77a3073441738c941f1ccb16cf1b is 43, key is default/ns:d/1731178314907/Put/seqid=0 2024-11-09T18:51:56,733 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,733 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,737 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1362927919_22 at /127.0.0.1:42882 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42882 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:56,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-09T18:51:56,742 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:56,742 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/ns/4ead77a3073441738c941f1ccb16cf1b 2024-11-09T18:51:56,751 DEBUG [RS:0;fb97eb0edbe8:43101 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs 2024-11-09T18:51:56,751 INFO [RS:0;fb97eb0edbe8:43101 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fb97eb0edbe8%2C43101%2C1731178311267:(num 1731178314251) 2024-11-09T18:51:56,751 DEBUG [RS:0;fb97eb0edbe8:43101 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,751 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:56,751 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:51:56,752 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:51:56,752 INFO [RS:0;fb97eb0edbe8:43101 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:43101 2024-11-09T18:51:56,758 DEBUG [RS:1;fb97eb0edbe8:41163 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fb97eb0edbe8%2C41163%2C1731178311407:(num 1731178314251) 2024-11-09T18:51:56,759 DEBUG [RS:1;fb97eb0edbe8:41163 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:51:56,759 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:51:56,759 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:51:56,760 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:51:56,760 INFO [RS:1;fb97eb0edbe8:41163 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:41163 2024-11-09T18:51:56,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,43101,1731178311267 2024-11-09T18:51:56,764 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:51:56,764 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:51:56,774 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,41163,1731178311407 2024-11-09T18:51:56,774 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:51:56,776 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/table/ef5faaa400024cfaa83e8a3c463f7ae9 is 52, key is TestHBaseWalOnEC/table:state/1731178315697/Put/seqid=0 2024-11-09T18:51:56,778 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,778 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:56,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1362927919_22 at /127.0.0.1:46630 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46630 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:56,785 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,41163,1731178311407] 2024-11-09T18:51:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-09T18:51:56,791 WARN [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:56,791 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/table/ef5faaa400024cfaa83e8a3c463f7ae9 2024-11-09T18:51:56,805 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/info/61e0a495e8a0425d940f3104627aca8c as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/info/61e0a495e8a0425d940f3104627aca8c 2024-11-09T18:51:56,806 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,41163,1731178311407 already deleted, retry=false 2024-11-09T18:51:56,806 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,41163,1731178311407 expired; onlineServers=2 2024-11-09T18:51:56,806 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,43101,1731178311267] 2024-11-09T18:51:56,816 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,43101,1731178311267 already deleted, retry=false 2024-11-09T18:51:56,816 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,43101,1731178311267 expired; onlineServers=1 2024-11-09T18:51:56,819 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/info/61e0a495e8a0425d940f3104627aca8c, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T18:51:56,821 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/ns/4ead77a3073441738c941f1ccb16cf1b as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/ns/4ead77a3073441738c941f1ccb16cf1b 2024-11-09T18:51:56,838 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/ns/4ead77a3073441738c941f1ccb16cf1b, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T18:51:56,840 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/.tmp/table/ef5faaa400024cfaa83e8a3c463f7ae9 as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/table/ef5faaa400024cfaa83e8a3c463f7ae9 2024-11-09T18:51:56,840 DEBUG [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-09T18:51:56,851 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/table/ef5faaa400024cfaa83e8a3c463f7ae9, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T18:51:56,853 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 212ms, sequenceid=11, compaction requested=false 2024-11-09T18:51:56,853 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-09T18:51:56,862 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T18:51:56,863 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T18:51:56,863 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T18:51:56,864 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731178316639Running coprocessor pre-close hooks at 1731178316639Disabling compacts and flushes for region at 1731178316639Disabling writes for close at 1731178316640 (+1 ms)Obtaining lock to block concurrent updates at 1731178316640Preparing flush snapshotting stores in 1588230740 at 1731178316640Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731178316641 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731178316647 (+6 ms)Flushing 1588230740/info: creating writer at 1731178316648 (+1 ms)Flushing 1588230740/info: appending metadata at 1731178316679 (+31 ms)Flushing 1588230740/info: closing flushed file at 1731178316679Flushing 1588230740/ns: creating writer at 1731178316714 (+35 ms)Flushing 1588230740/ns: appending metadata at 1731178316729 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731178316729Flushing 1588230740/table: creating writer at 1731178316751 (+22 ms)Flushing 1588230740/table: appending metadata at 1731178316774 (+23 ms)Flushing 1588230740/table: closing flushed file at 1731178316774Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a3948ac: reopening flushed file at 1731178316803 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1062e4be: reopening flushed file at 1731178316819 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@106d2983: reopening flushed file at 1731178316838 (+19 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 212ms, sequenceid=11, compaction requested=false at 1731178316853 (+15 ms)Writing region close event to WAL at 1731178316855 (+2 ms)Running coprocessor post-close hooks at 1731178316863 (+8 ms)Closed at 1731178316863 2024-11-09T18:51:56,864 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T18:51:56,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:56,886 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43101-0x10120f0a3f90001, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:56,886 INFO [RS:0;fb97eb0edbe8:43101 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:51:56,886 INFO [RS:0;fb97eb0edbe8:43101 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,43101,1731178311267; zookeeper connection closed. 2024-11-09T18:51:56,886 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56091ebf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56091ebf 2024-11-09T18:51:56,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:56,895 INFO [RS:1;fb97eb0edbe8:41163 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:51:56,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41163-0x10120f0a3f90002, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:56,895 INFO [RS:1;fb97eb0edbe8:41163 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,41163,1731178311407; zookeeper connection closed. 2024-11-09T18:51:56,896 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fd4ea40 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fd4ea40 2024-11-09T18:51:57,040 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,44469,1731178311459; all regions closed. 2024-11-09T18:51:57,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_1073741829_1019 (size=2751) 2024-11-09T18:51:57,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_1073741829_1019 (size=2751) 2024-11-09T18:51:57,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_1073741829_1019 (size=2751) 2024-11-09T18:51:57,047 DEBUG [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs 2024-11-09T18:51:57,047 INFO [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fb97eb0edbe8%2C44469%2C1731178311459.meta:.meta(num 1731178314709) 2024-11-09T18:51:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_1073741826_1016 (size=1298) 2024-11-09T18:51:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_1073741826_1016 (size=1298) 2024-11-09T18:51:57,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_1073741826_1016 (size=1298) 2024-11-09T18:51:57,054 DEBUG [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/oldWALs 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL fb97eb0edbe8%2C44469%2C1731178311459:(num 1731178314250) 2024-11-09T18:51:57,054 DEBUG [RS:2;fb97eb0edbe8:44469 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:51:57,054 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:51:57,054 INFO [RS:2;fb97eb0edbe8:44469 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44469 2024-11-09T18:51:57,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:51:57,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,44469,1731178311459 2024-11-09T18:51:57,064 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:51:57,064 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f69c08f5140@4dbdd3dc rejected from java.util.concurrent.ThreadPoolExecutor@8bf440f[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-09T18:51:57,074 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,44469,1731178311459] 2024-11-09T18:51:57,084 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,44469,1731178311459 already deleted, retry=false 2024-11-09T18:51:57,085 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,44469,1731178311459 expired; onlineServers=0 2024-11-09T18:51:57,085 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fb97eb0edbe8,45547,1731178310487' ***** 2024-11-09T18:51:57,085 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T18:51:57,085 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:51:57,085 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:51:57,085 DEBUG [M:0;fb97eb0edbe8:45547 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T18:51:57,085 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T18:51:57,085 DEBUG [M:0;fb97eb0edbe8:45547 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T18:51:57,085 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178313861 {}] cleaner.HFileCleaner(306): Exit Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178313861,5,FailOnTimeoutGroup] 2024-11-09T18:51:57,085 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178313862 {}] cleaner.HFileCleaner(306): Exit Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178313862,5,FailOnTimeoutGroup] 2024-11-09T18:51:57,086 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.ChoreService(370): Chore service for: master/fb97eb0edbe8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T18:51:57,086 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:51:57,086 DEBUG [M:0;fb97eb0edbe8:45547 {}] master.HMaster(1795): Stopping service threads 2024-11-09T18:51:57,086 INFO [M:0;fb97eb0edbe8:45547 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T18:51:57,086 INFO [M:0;fb97eb0edbe8:45547 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T18:51:57,087 INFO [M:0;fb97eb0edbe8:45547 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T18:51:57,087 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T18:51:57,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T18:51:57,095 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:51:57,095 DEBUG [M:0;fb97eb0edbe8:45547 {}] zookeeper.ZKUtil(347): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T18:51:57,095 WARN [M:0;fb97eb0edbe8:45547 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T18:51:57,097 INFO [M:0;fb97eb0edbe8:45547 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/.lastflushedseqids 2024-11-09T18:51:57,108 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,108 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:46648 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:38899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46648 dst: /127.0.0.1:38899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:57,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-09T18:51:57,115 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:57,115 INFO [M:0;fb97eb0edbe8:45547 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T18:51:57,115 INFO [M:0;fb97eb0edbe8:45547 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T18:51:57,115 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T18:51:57,116 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:57,116 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:57,116 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T18:51:57,116 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:57,116 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-09T18:51:57,138 DEBUG [M:0;fb97eb0edbe8:45547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/acd3fbddaed145f4a12f6e586b74d3be is 82, key is hbase:meta,,1/info:regioninfo/1731178314814/Put/seqid=0 2024-11-09T18:51:57,144 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,144 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,148 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:42894 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:43411:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42894 dst: /127.0.0.1:43411 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:57,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-09T18:51:57,154 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:57,154 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/acd3fbddaed145f4a12f6e586b74d3be 2024-11-09T18:51:57,174 INFO [RS:2;fb97eb0edbe8:44469 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:51:57,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:57,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44469-0x10120f0a3f90003, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:57,174 INFO [RS:2;fb97eb0edbe8:44469 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,44469,1731178311459; zookeeper connection closed. 2024-11-09T18:51:57,175 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6d5e312 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6d5e312 2024-11-09T18:51:57,175 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T18:51:57,178 DEBUG [M:0;fb97eb0edbe8:45547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff63c20d2b504cc7a4c2c4d205319317 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731178315705/Put/seqid=0 2024-11-09T18:51:57,180 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,180 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,184 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:56094 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:44911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56094 dst: /127.0.0.1:44911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:57,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-09T18:51:57,189 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:57,189 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff63c20d2b504cc7a4c2c4d205319317 2024-11-09T18:51:57,213 DEBUG [M:0;fb97eb0edbe8:45547 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/84e722b67d78425d9cccfbd4c6250ec2 is 69, key is fb97eb0edbe8,41163,1731178311407/rs:state/1731178313945/Put/seqid=0 2024-11-09T18:51:57,216 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,216 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-09T18:51:57,219 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-682437310_22 at /127.0.0.1:56112 [Receiving block BP-1136675770-172.17.0.3-1731178304792:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:44911:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56112 dst: /127.0.0.1:44911 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-09T18:51:57,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-09T18:51:57,224 WARN [M:0;fb97eb0edbe8:45547 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-09T18:51:57,224 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/84e722b67d78425d9cccfbd4c6250ec2 2024-11-09T18:51:57,233 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/acd3fbddaed145f4a12f6e586b74d3be as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/acd3fbddaed145f4a12f6e586b74d3be 2024-11-09T18:51:57,242 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/acd3fbddaed145f4a12f6e586b74d3be, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T18:51:57,244 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/ff63c20d2b504cc7a4c2c4d205319317 as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff63c20d2b504cc7a4c2c4d205319317 2024-11-09T18:51:57,252 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/ff63c20d2b504cc7a4c2c4d205319317, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T18:51:57,253 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/84e722b67d78425d9cccfbd4c6250ec2 as hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/84e722b67d78425d9cccfbd4c6250ec2 2024-11-09T18:51:57,260 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/84e722b67d78425d9cccfbd4c6250ec2, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T18:51:57,262 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=72, compaction requested=false 2024-11-09T18:51:57,263 INFO [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:51:57,263 DEBUG [M:0;fb97eb0edbe8:45547 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731178317115Disabling compacts and flushes for region at 1731178317115Disabling writes for close at 1731178317116 (+1 ms)Obtaining lock to block concurrent updates at 1731178317116Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731178317116Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731178317116Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731178317117 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731178317117Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731178317138 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731178317138Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731178317163 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731178317178 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731178317178Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731178317198 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731178317213 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731178317213Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46b31e47: reopening flushed file at 1731178317231 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1629e982: reopening flushed file at 1731178317242 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41476f50: reopening flushed file at 1731178317252 (+10 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 145ms, sequenceid=72, compaction requested=false at 1731178317262 (+10 ms)Writing region close event to WAL at 1731178317263 (+1 ms)Closed at 1731178317263 2024-11-09T18:51:57,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38899 is added to blk_1073741825_1011 (size=32686) 2024-11-09T18:51:57,266 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/MasterData/WALs/fb97eb0edbe8,45547,1731178310487/fb97eb0edbe8%2C45547%2C1731178310487.1731178312103 not finished, retry = 0 2024-11-09T18:51:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44911 is added to blk_1073741825_1011 (size=32686) 2024-11-09T18:51:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43411 is added to blk_1073741825_1011 (size=32686) 2024-11-09T18:51:57,368 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:51:57,368 INFO [M:0;fb97eb0edbe8:45547 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T18:51:57,368 INFO [M:0;fb97eb0edbe8:45547 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45547 2024-11-09T18:51:57,368 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:51:57,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:57,480 INFO [M:0;fb97eb0edbe8:45547 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:51:57,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45547-0x10120f0a3f90000, quorum=127.0.0.1:51638, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:51:57,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@554ba3d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:57,515 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64a37729{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:51:57,515 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:51:57,515 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3891561d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:51:57,516 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1646e48a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,STOPPED} 2024-11-09T18:51:57,518 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:51:57,518 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:51:57,518 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1136675770-172.17.0.3-1731178304792 (Datanode Uuid e61bdbdf-bfb1-4797-92c3-bd42021f1e5a) service to localhost/127.0.0.1:34993 2024-11-09T18:51:57,518 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:51:57,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data5/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,519 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data6/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,520 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:51:57,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1327a94d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:57,522 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@674554fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:51:57,522 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:51:57,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19093484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:51:57,522 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56f2bf79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,STOPPED} 2024-11-09T18:51:57,525 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:51:57,525 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:51:57,525 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:51:57,525 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1136675770-172.17.0.3-1731178304792 (Datanode Uuid 4b63c7ba-9242-405d-b626-e72e5113f66b) service to localhost/127.0.0.1:34993 2024-11-09T18:51:57,525 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data3/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,526 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data4/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,526 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:51:57,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ec777b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:57,528 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145f251e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:51:57,528 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:51:57,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e9e5394{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:51:57,528 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a55babc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,STOPPED} 2024-11-09T18:51:57,530 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:51:57,530 WARN [BP-1136675770-172.17.0.3-1731178304792 heartbeating to localhost/127.0.0.1:34993 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1136675770-172.17.0.3-1731178304792 (Datanode Uuid a6ad5aa7-2e63-4e0b-9b22-816e51f14e8f) service to localhost/127.0.0.1:34993 2024-11-09T18:51:57,530 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:51:57,530 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:51:57,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data1/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,531 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/cluster_15bf9a5a-0fe9-fe1c-8b43-493ac117bdc2/data/data2/current/BP-1136675770-172.17.0.3-1731178304792 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:51:57,532 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:51:57,541 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44270346{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T18:51:57,542 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:51:57,542 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:51:57,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:51:57,542 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir/,STOPPED} 2024-11-09T18:51:57,550 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T18:51:57,579 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T18:51:57,586 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=87 (was 160), OpenFileDescriptor=439 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=165 (was 151) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5040 (was 5512) 2024-11-09T18:51:57,592 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=87, OpenFileDescriptor=439, MaxFileDescriptor=1048576, SystemLoadAverage=165, ProcessCount=11, AvailableMemoryMB=5039 2024-11-09T18:51:57,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-09T18:51:57,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.log.dir so I do NOT create it in target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04 2024-11-09T18:51:57,592 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/6481d617-8c60-a2dd-f626-b815128a37e4/hadoop.tmp.dir so I do NOT create it in target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c, deleteOnExit=true 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/test.cache.data in system properties and HBase conf 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.tmp.dir in system properties and HBase conf 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir in system properties and HBase conf 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-09T18:51:57,593 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-09T18:51:57,593 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T18:51:57,594 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-09T18:51:57,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/nfs.dump.dir in system properties and HBase conf 2024-11-09T18:51:57,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/java.io.tmpdir in system properties and HBase conf 2024-11-09T18:51:57,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-09T18:51:57,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-09T18:51:57,595 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-09T18:51:57,992 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:57,997 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:57,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:57,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:57,999 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:58,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:58,000 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28c24cda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:58,000 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1537a16f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:58,098 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9f29b33{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/java.io.tmpdir/jetty-localhost-37141-hadoop-hdfs-3_4_1-tests_jar-_-any-7021003212207680432/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T18:51:58,098 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a7ed972{HTTP/1.1, (http/1.1)}{localhost:37141} 2024-11-09T18:51:58,098 INFO [Time-limited test {}] server.Server(415): Started @15630ms 2024-11-09T18:51:58,415 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:58,419 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:58,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:58,420 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:58,420 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-09T18:51:58,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51329d3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:58,421 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@25117e55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:58,518 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ed60080{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/java.io.tmpdir/jetty-localhost-42021-hadoop-hdfs-3_4_1-tests_jar-_-any-10317237427180807630/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:58,518 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1f9b01b{HTTP/1.1, (http/1.1)}{localhost:42021} 2024-11-09T18:51:58,518 INFO [Time-limited test {}] server.Server(415): Started @16050ms 2024-11-09T18:51:58,520 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:58,554 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:58,558 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:58,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:58,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:58,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:58,559 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134b78f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:58,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@436754c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:58,658 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@627fdc4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/java.io.tmpdir/jetty-localhost-44661-hadoop-hdfs-3_4_1-tests_jar-_-any-11942416194244919597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:58,658 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1781e9e7{HTTP/1.1, (http/1.1)}{localhost:44661} 2024-11-09T18:51:58,658 INFO [Time-limited test {}] server.Server(415): Started @16190ms 2024-11-09T18:51:58,660 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:58,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-09T18:51:58,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-09T18:51:58,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-09T18:51:58,697 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-09T18:51:58,697 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-09T18:51:58,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46e75277{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,AVAILABLE} 2024-11-09T18:51:58,698 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2924ad14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-09T18:51:58,811 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55a80442{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/java.io.tmpdir/jetty-localhost-42349-hadoop-hdfs-3_4_1-tests_jar-_-any-8857267558610645241/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:51:58,811 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d3aadcd{HTTP/1.1, (http/1.1)}{localhost:42349} 2024-11-09T18:51:58,811 INFO [Time-limited test {}] server.Server(415): Started @16343ms 2024-11-09T18:51:58,813 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-09T18:51:59,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:51:59,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:51:59,758 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:52:00,262 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-09T18:52:00,270 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:52:00,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:52:00,324 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-09T18:52:00,544 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data1/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:00,544 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data2/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:00,559 WARN [Thread-502 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:52:00,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b891c7b2a7af9be with lease ID 0xb081f5612841a4e0: Processing first storage report for DS-31285ed1-433d-41f5-adbd-40d54c657d18 from datanode DatanodeRegistration(127.0.0.1:40859, datanodeUuid=393b14e2-e9cc-4e26-ac84-2f0c1a06ad1a, infoPort=40147, infoSecurePort=0, ipcPort=41079, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:00,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b891c7b2a7af9be with lease ID 0xb081f5612841a4e0: from storage DS-31285ed1-433d-41f5-adbd-40d54c657d18 node DatanodeRegistration(127.0.0.1:40859, datanodeUuid=393b14e2-e9cc-4e26-ac84-2f0c1a06ad1a, infoPort=40147, infoSecurePort=0, ipcPort=41079, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:52:00,563 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b891c7b2a7af9be with lease ID 0xb081f5612841a4e0: Processing first storage report for DS-467d08f2-524b-41e1-a0b7-478a0112e661 from datanode DatanodeRegistration(127.0.0.1:40859, datanodeUuid=393b14e2-e9cc-4e26-ac84-2f0c1a06ad1a, infoPort=40147, infoSecurePort=0, ipcPort=41079, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:00,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b891c7b2a7af9be with lease ID 0xb081f5612841a4e0: from storage DS-467d08f2-524b-41e1-a0b7-478a0112e661 node DatanodeRegistration(127.0.0.1:40859, datanodeUuid=393b14e2-e9cc-4e26-ac84-2f0c1a06ad1a, infoPort=40147, infoSecurePort=0, ipcPort=41079, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:52:01,020 WARN [Thread-574 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data3/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:01,020 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data4/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:01,035 WARN [Thread-525 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:52:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59232d2a472f7688 with lease ID 0xb081f5612841a4e1: Processing first storage report for DS-333307cc-23b3-41d4-87df-0fb502f1e066 from datanode DatanodeRegistration(127.0.0.1:38923, datanodeUuid=4d8a22e4-73ab-4a1a-9f5a-e95024b543ea, infoPort=33711, infoSecurePort=0, ipcPort=46077, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59232d2a472f7688 with lease ID 0xb081f5612841a4e1: from storage DS-333307cc-23b3-41d4-87df-0fb502f1e066 node DatanodeRegistration(127.0.0.1:38923, datanodeUuid=4d8a22e4-73ab-4a1a-9f5a-e95024b543ea, infoPort=33711, infoSecurePort=0, ipcPort=46077, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T18:52:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x59232d2a472f7688 with lease ID 0xb081f5612841a4e1: Processing first storage report for DS-f79e6fc9-b647-4418-ac52-aaa23bb094ec from datanode DatanodeRegistration(127.0.0.1:38923, datanodeUuid=4d8a22e4-73ab-4a1a-9f5a-e95024b543ea, infoPort=33711, infoSecurePort=0, ipcPort=46077, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x59232d2a472f7688 with lease ID 0xb081f5612841a4e1: from storage DS-f79e6fc9-b647-4418-ac52-aaa23bb094ec node DatanodeRegistration(127.0.0.1:38923, datanodeUuid=4d8a22e4-73ab-4a1a-9f5a-e95024b543ea, infoPort=33711, infoSecurePort=0, ipcPort=46077, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:52:01,133 WARN [Thread-585 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data5/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:01,133 WARN [Thread-586 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data6/current/BP-18283849-172.17.0.3-1731178317626/current, will proceed with Du for space computation calculation, 2024-11-09T18:52:01,148 WARN [Thread-547 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-09T18:52:01,151 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4cb97a938ec87a36 with lease ID 0xb081f5612841a4e2: Processing first storage report for DS-c86789dc-a60c-4564-abf0-e7f6dd96c0ee from datanode DatanodeRegistration(127.0.0.1:45411, datanodeUuid=042ed729-ba26-4c21-ad70-1cb667b29c16, infoPort=36383, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:01,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cb97a938ec87a36 with lease ID 0xb081f5612841a4e2: from storage DS-c86789dc-a60c-4564-abf0-e7f6dd96c0ee node DatanodeRegistration(127.0.0.1:45411, datanodeUuid=042ed729-ba26-4c21-ad70-1cb667b29c16, infoPort=36383, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-09T18:52:01,151 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4cb97a938ec87a36 with lease ID 0xb081f5612841a4e2: Processing first storage report for DS-3591335f-3124-461a-8ade-1052284af104 from datanode DatanodeRegistration(127.0.0.1:45411, datanodeUuid=042ed729-ba26-4c21-ad70-1cb667b29c16, infoPort=36383, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626) 2024-11-09T18:52:01,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4cb97a938ec87a36 with lease ID 0xb081f5612841a4e2: from storage DS-3591335f-3124-461a-8ade-1052284af104 node DatanodeRegistration(127.0.0.1:45411, datanodeUuid=042ed729-ba26-4c21-ad70-1cb667b29c16, infoPort=36383, infoSecurePort=0, ipcPort=41669, storageInfo=lv=-57;cid=testClusterID;nsid=962602856;c=1731178317626), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-09T18:52:01,158 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04 2024-11-09T18:52:01,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/zookeeper_0, clientPort=57051, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-09T18:52:01,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57051 2024-11-09T18:52:01,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,164 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741825_1001 (size=7) 2024-11-09T18:52:01,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741825_1001 (size=7) 2024-11-09T18:52:01,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741825_1001 (size=7) 2024-11-09T18:52:01,180 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 with version=8 2024-11-09T18:52:01,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:34993/user/jenkins/test-data/b7763779-13bf-7060-87ad-44990076340b/hbase-staging 2024-11-09T18:52:01,183 INFO [Time-limited test {}] client.ConnectionUtils(128): master/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-09T18:52:01,183 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:52:01,184 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45825 2024-11-09T18:52:01,186 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45825 connecting to ZooKeeper ensemble=127.0.0.1:57051 2024-11-09T18:52:01,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:458250x0, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:52:01,242 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45825-0x10120f0d0b40000 connected 2024-11-09T18:52:01,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,331 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:01,331 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23, hbase.cluster.distributed=false 2024-11-09T18:52:01,334 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:52:01,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45825 2024-11-09T18:52:01,335 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45825 2024-11-09T18:52:01,336 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45825 2024-11-09T18:52:01,337 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45825 2024-11-09T18:52:01,337 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45825 2024-11-09T18:52:01,352 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:52:01,352 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:52:01,353 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:33623 2024-11-09T18:52:01,355 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33623 connecting to ZooKeeper ensemble=127.0.0.1:57051 2024-11-09T18:52:01,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,358 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336230x0, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:52:01,375 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:01,375 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33623-0x10120f0d0b40001 connected 2024-11-09T18:52:01,375 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:52:01,376 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:52:01,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:52:01,379 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:52:01,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-09T18:52:01,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33623 2024-11-09T18:52:01,384 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33623 2024-11-09T18:52:01,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-09T18:52:01,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33623 2024-11-09T18:52:01,404 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:52:01,404 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:52:01,405 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:52:01,406 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35949 2024-11-09T18:52:01,407 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35949 connecting to ZooKeeper ensemble=127.0.0.1:57051 2024-11-09T18:52:01,408 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,409 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:359490x0, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:52:01,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:01,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35949-0x10120f0d0b40002 connected 2024-11-09T18:52:01,422 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:52:01,423 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:52:01,424 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:52:01,425 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:52:01,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35949 2024-11-09T18:52:01,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35949 2024-11-09T18:52:01,426 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35949 2024-11-09T18:52:01,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35949 2024-11-09T18:52:01,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35949 2024-11-09T18:52:01,440 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/fb97eb0edbe8:0 server-side Connection retries=45 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-09T18:52:01,441 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-09T18:52:01,442 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:35061 2024-11-09T18:52:01,443 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35061 connecting to ZooKeeper ensemble=127.0.0.1:57051 2024-11-09T18:52:01,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,445 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350610x0, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-09T18:52:01,459 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350610x0, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:01,459 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35061-0x10120f0d0b40003 connected 2024-11-09T18:52:01,459 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-09T18:52:01,460 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-09T18:52:01,461 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-09T18:52:01,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-09T18:52:01,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-09T18:52:01,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35061 2024-11-09T18:52:01,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35061 2024-11-09T18:52:01,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-09T18:52:01,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35061 2024-11-09T18:52:01,475 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;fb97eb0edbe8:45825 2024-11-09T18:52:01,476 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,485 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,495 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,496 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-09T18:52:01,496 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/fb97eb0edbe8,45825,1731178321182 from backup master directory 2024-11-09T18:52:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,506 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:52:01,506 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-09T18:52:01,506 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,513 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/hbase.id] with ID: e012dde2-f5c9-48be-9624-1eea107e69b7 2024-11-09T18:52:01,513 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/.tmp/hbase.id 2024-11-09T18:52:01,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741826_1002 (size=42) 2024-11-09T18:52:01,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741826_1002 (size=42) 2024-11-09T18:52:01,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741826_1002 (size=42) 2024-11-09T18:52:01,526 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/.tmp/hbase.id]:[hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/hbase.id] 2024-11-09T18:52:01,547 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-09T18:52:01,547 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-09T18:52:01,550 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 3ms. 2024-11-09T18:52:01,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,564 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741827_1003 (size=196) 2024-11-09T18:52:01,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741827_1003 (size=196) 2024-11-09T18:52:01,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741827_1003 (size=196) 2024-11-09T18:52:01,580 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T18:52:01,581 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-09T18:52:01,581 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T18:52:01,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741828_1004 (size=1189) 2024-11-09T18:52:01,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741828_1004 (size=1189) 2024-11-09T18:52:01,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741828_1004 (size=1189) 2024-11-09T18:52:01,598 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store 2024-11-09T18:52:01,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741829_1005 (size=34) 2024-11-09T18:52:01,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741829_1005 (size=34) 2024-11-09T18:52:01,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741829_1005 (size=34) 2024-11-09T18:52:01,611 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:01,612 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T18:52:01,612 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:01,612 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:01,612 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T18:52:01,612 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:01,612 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:01,612 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731178321612Disabling compacts and flushes for region at 1731178321612Disabling writes for close at 1731178321612Writing region close event to WAL at 1731178321612Closed at 1731178321612 2024-11-09T18:52:01,614 WARN [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/.initializing 2024-11-09T18:52:01,614 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/WALs/fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,619 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C45825%2C1731178321182, suffix=, logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/WALs/fb97eb0edbe8,45825,1731178321182, archiveDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/oldWALs, maxLogs=10 2024-11-09T18:52:01,620 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor fb97eb0edbe8%2C45825%2C1731178321182.1731178321619 2024-11-09T18:52:01,632 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/WALs/fb97eb0edbe8,45825,1731178321182/fb97eb0edbe8%2C45825%2C1731178321182.1731178321619 2024-11-09T18:52:01,642 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:33711:33711)] 2024-11-09T18:52:01,644 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:52:01,644 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:01,645 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,645 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,647 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-09T18:52:01,650 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,650 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:01,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,653 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-09T18:52:01,653 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:52:01,654 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,657 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-09T18:52:01,657 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:52:01,658 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,661 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-09T18:52:01,661 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,662 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:52:01,662 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,663 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,664 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,665 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,666 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,666 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:52:01,668 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-09T18:52:01,670 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:52:01,671 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64049246, jitterRate=-0.04559186100959778}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:52:01,672 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731178321645Initializing all the Stores at 1731178321646 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178321646Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178321647 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178321647Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178321647Cleaning up temporary data from old regions at 1731178321666 (+19 ms)Region opened successfully at 1731178321672 (+6 ms) 2024-11-09T18:52:01,672 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-09T18:52:01,677 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5366993c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:52:01,678 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-09T18:52:01,678 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-09T18:52:01,678 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-09T18:52:01,678 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-09T18:52:01,679 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-09T18:52:01,679 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-09T18:52:01,680 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-09T18:52:01,682 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-09T18:52:01,684 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-09T18:52:01,695 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-09T18:52:01,696 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-09T18:52:01,697 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-09T18:52:01,705 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-09T18:52:01,706 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-09T18:52:01,707 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-09T18:52:01,716 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-09T18:52:01,717 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-09T18:52:01,726 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-09T18:52:01,729 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-09T18:52:01,737 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,748 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,748 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=fb97eb0edbe8,45825,1731178321182, sessionid=0x10120f0d0b40000, setting cluster-up flag (Was=false) 2024-11-09T18:52:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,769 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,800 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-09T18:52:01,802 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,821 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:01,853 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-09T18:52:01,854 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:01,856 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-09T18:52:01,859 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-09T18:52:01,859 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-09T18:52:01,859 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-09T18:52:01,859 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: fb97eb0edbe8,45825,1731178321182 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=5, maxPoolSize=5 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/fb97eb0edbe8:0, corePoolSize=10, maxPoolSize=10 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:52:01,861 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:01,864 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:52:01,864 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-09T18:52:01,865 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,865 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-09T18:52:01,874 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(746): ClusterId : e012dde2-f5c9-48be-9624-1eea107e69b7 2024-11-09T18:52:01,874 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:52:01,875 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(746): ClusterId : e012dde2-f5c9-48be-9624-1eea107e69b7 2024-11-09T18:52:01,875 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:52:01,875 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(746): ClusterId : e012dde2-f5c9-48be-9624-1eea107e69b7 2024-11-09T18:52:01,876 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731178351876 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-09T18:52:01,876 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-09T18:52:01,878 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:01,878 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-09T18:52:01,879 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-09T18:52:01,879 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-09T18:52:01,880 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-09T18:52:01,880 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-09T18:52:01,881 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178321880,5,FailOnTimeoutGroup] 2024-11-09T18:52:01,884 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178321881,5,FailOnTimeoutGroup] 2024-11-09T18:52:01,884 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:01,885 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-09T18:52:01,885 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:01,885 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:01,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741831_1007 (size=1321) 2024-11-09T18:52:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741831_1007 (size=1321) 2024-11-09T18:52:01,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741831_1007 (size=1321) 2024-11-09T18:52:01,891 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-09T18:52:01,892 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 2024-11-09T18:52:01,896 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:52:01,896 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:52:01,896 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:52:01,896 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:52:01,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741832_1008 (size=32) 2024-11-09T18:52:01,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741832_1008 (size=32) 2024-11-09T18:52:01,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741832_1008 (size=32) 2024-11-09T18:52:01,906 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:01,908 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T18:52:01,910 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T18:52:01,911 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:01,911 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T18:52:01,913 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T18:52:01,914 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,914 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:01,915 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T18:52:01,916 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-09T18:52:01,916 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-09T18:52:01,917 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:52:01,917 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T18:52:01,917 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,917 DEBUG [RS:1;fb97eb0edbe8:35949 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@637b9306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:52:01,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:01,918 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T18:52:01,920 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T18:52:01,920 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:01,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:01,922 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T18:52:01,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740 2024-11-09T18:52:01,923 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740 2024-11-09T18:52:01,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T18:52:01,925 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T18:52:01,926 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:52:01,926 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:52:01,927 DEBUG [RS:2;fb97eb0edbe8:35061 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fbf1e16, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:52:01,928 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-09T18:52:01,928 DEBUG [RS:0;fb97eb0edbe8:33623 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70287e38, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=fb97eb0edbe8/172.17.0.3:0 2024-11-09T18:52:01,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T18:52:01,932 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;fb97eb0edbe8:35949 2024-11-09T18:52:01,933 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:52:01,933 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:52:01,933 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:52:01,940 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45825,1731178321182 with port=35949, startcode=1731178321404 2024-11-09T18:52:01,941 DEBUG [RS:1;fb97eb0edbe8:35949 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:52:01,941 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:52:01,942 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64136787, jitterRate=-0.04428739845752716}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:52:01,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731178321906Initializing all the Stores at 1731178321907 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178321907Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178321908 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178321908Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178321908Cleaning up temporary data from old regions at 1731178321925 (+17 ms)Region opened successfully at 1731178321943 (+18 ms) 2024-11-09T18:52:01,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T18:52:01,943 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T18:52:01,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T18:52:01,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T18:52:01,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T18:52:01,946 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;fb97eb0edbe8:33623 2024-11-09T18:52:01,946 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:52:01,946 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:52:01,946 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:52:01,947 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45825,1731178321182 with port=33623, startcode=1731178321351 2024-11-09T18:52:01,948 DEBUG [RS:0;fb97eb0edbe8:33623 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:52:01,948 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;fb97eb0edbe8:35061 2024-11-09T18:52:01,948 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-09T18:52:01,948 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-09T18:52:01,948 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-09T18:52:01,948 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T18:52:01,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731178321943Disabling compacts and flushes for region at 1731178321943Disabling writes for close at 1731178321943Writing region close event to WAL at 1731178321948 (+5 ms)Closed at 1731178321948 2024-11-09T18:52:01,949 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(2659): reportForDuty to master=fb97eb0edbe8,45825,1731178321182 with port=35061, startcode=1731178321440 2024-11-09T18:52:01,949 DEBUG [RS:2;fb97eb0edbe8:35061 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-09T18:52:01,949 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57441, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:52:01,950 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37467, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:52:01,951 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:52:01,951 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-09T18:52:01,951 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-09T18:52:01,951 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:01,951 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:01,953 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-09T18:52:01,953 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T18:52:01,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:01,954 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:01,955 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 2024-11-09T18:52:01,955 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34747 2024-11-09T18:52:01,955 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:52:01,956 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-09T18:52:01,956 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(363): Checking decommissioned status of RegionServer fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:01,956 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45825 {}] master.ServerManager(517): Registering regionserver=fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:01,957 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 2024-11-09T18:52:01,957 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34747 2024-11-09T18:52:01,957 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:52:01,958 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 2024-11-09T18:52:01,958 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34747 2024-11-09T18:52:01,958 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-09T18:52:01,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:52:02,032 DEBUG [RS:1;fb97eb0edbe8:35949 {}] zookeeper.ZKUtil(111): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:02,032 WARN [RS:1;fb97eb0edbe8:35949 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:52:02,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,35949,1731178321404] 2024-11-09T18:52:02,032 INFO [RS:1;fb97eb0edbe8:35949 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T18:52:02,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,33623,1731178321351] 2024-11-09T18:52:02,032 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [fb97eb0edbe8,35061,1731178321440] 2024-11-09T18:52:02,032 DEBUG [RS:2;fb97eb0edbe8:35061 {}] zookeeper.ZKUtil(111): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:02,032 DEBUG [RS:0;fb97eb0edbe8:33623 {}] zookeeper.ZKUtil(111): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,032 DEBUG [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:02,032 WARN [RS:2;fb97eb0edbe8:35061 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:52:02,032 WARN [RS:0;fb97eb0edbe8:33623 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-09T18:52:02,032 INFO [RS:2;fb97eb0edbe8:35061 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T18:52:02,033 INFO [RS:0;fb97eb0edbe8:33623 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T18:52:02,033 DEBUG [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:02,033 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,037 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:52:02,037 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:52:02,039 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:52:02,039 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-09T18:52:02,040 INFO [RS:2;fb97eb0edbe8:35061 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:52:02,040 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,040 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:52:02,041 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:52:02,041 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,041 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,042 DEBUG [RS:2;fb97eb0edbe8:35061 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,044 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:52:02,049 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:1;fb97eb0edbe8:35949 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:52:02,049 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,049 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,35061,1731178321440-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:52:02,050 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:52:02,050 INFO [RS:0;fb97eb0edbe8:33623 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-09T18:52:02,050 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,051 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:52:02,051 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,051 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,051 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,052 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,053 DEBUG [RS:1;fb97eb0edbe8:35949 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,054 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-09T18:52:02,056 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-09T18:52:02,056 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,056 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,056 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,056 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,056 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,056 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=2, maxPoolSize=2 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,35949,1731178321404-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/fb97eb0edbe8:0, corePoolSize=1, maxPoolSize=1 2024-11-09T18:52:02,057 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,058 DEBUG [RS:0;fb97eb0edbe8:33623 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0, corePoolSize=3, maxPoolSize=3 2024-11-09T18:52:02,064 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,064 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,064 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,064 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,065 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,065 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,33623,1731178321351-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:52:02,072 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:52:02,073 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,35061,1731178321440-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,073 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,073 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.Replication(171): fb97eb0edbe8,35061,1731178321440 started 2024-11-09T18:52:02,075 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:52:02,075 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,35949,1731178321404-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,075 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,075 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.Replication(171): fb97eb0edbe8,35949,1731178321404 started 2024-11-09T18:52:02,079 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-09T18:52:02,079 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,33623,1731178321351-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,080 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,080 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.Replication(171): fb97eb0edbe8,33623,1731178321351 started 2024-11-09T18:52:02,088 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,088 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,35061,1731178321440, RpcServer on fb97eb0edbe8/172.17.0.3:35061, sessionid=0x10120f0d0b40003 2024-11-09T18:52:02,088 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:52:02,088 DEBUG [RS:2;fb97eb0edbe8:35061 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:02,088 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,35061,1731178321440' 2024-11-09T18:52:02,088 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:52:02,089 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:52:02,090 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,35061,1731178321440' 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:52:02,090 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,35949,1731178321404, RpcServer on fb97eb0edbe8/172.17.0.3:35949, sessionid=0x10120f0d0b40002 2024-11-09T18:52:02,090 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:52:02,090 DEBUG [RS:1;fb97eb0edbe8:35949 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:02,090 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,35949,1731178321404' 2024-11-09T18:52:02,090 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:52:02,090 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:52:02,091 DEBUG [RS:2;fb97eb0edbe8:35061 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:52:02,091 INFO [RS:2;fb97eb0edbe8:35061 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:52:02,091 INFO [RS:2;fb97eb0edbe8:35061 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,35949,1731178321404' 2024-11-09T18:52:02,091 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:52:02,092 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:52:02,092 DEBUG [RS:1;fb97eb0edbe8:35949 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:52:02,092 INFO [RS:1;fb97eb0edbe8:35949 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:52:02,092 INFO [RS:1;fb97eb0edbe8:35949 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:52:02,094 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,094 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1482): Serving as fb97eb0edbe8,33623,1731178321351, RpcServer on fb97eb0edbe8/172.17.0.3:33623, sessionid=0x10120f0d0b40001 2024-11-09T18:52:02,094 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-09T18:52:02,094 DEBUG [RS:0;fb97eb0edbe8:33623 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,094 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,33623,1731178321351' 2024-11-09T18:52:02,094 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-09T18:52:02,095 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'fb97eb0edbe8,33623,1731178321351' 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-09T18:52:02,096 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-09T18:52:02,097 DEBUG [RS:0;fb97eb0edbe8:33623 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-09T18:52:02,097 INFO [RS:0;fb97eb0edbe8:33623 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-09T18:52:02,097 INFO [RS:0;fb97eb0edbe8:33623 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-09T18:52:02,106 WARN [fb97eb0edbe8:45825 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-09T18:52:02,194 INFO [RS:2;fb97eb0edbe8:35061 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C35061%2C1731178321440, suffix=, logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35061,1731178321440, archiveDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs, maxLogs=32 2024-11-09T18:52:02,194 INFO [RS:1;fb97eb0edbe8:35949 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C35949%2C1731178321404, suffix=, logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35949,1731178321404, archiveDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs, maxLogs=32 2024-11-09T18:52:02,197 INFO [RS:1;fb97eb0edbe8:35949 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fb97eb0edbe8%2C35949%2C1731178321404.1731178322196 2024-11-09T18:52:02,197 INFO [RS:2;fb97eb0edbe8:35061 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fb97eb0edbe8%2C35061%2C1731178321440.1731178322196 2024-11-09T18:52:02,199 INFO [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C33623%2C1731178321351, suffix=, logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,33623,1731178321351, archiveDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs, maxLogs=32 2024-11-09T18:52:02,200 INFO [RS:0;fb97eb0edbe8:33623 {}] monitor.StreamSlowMonitor(122): New stream slow monitor fb97eb0edbe8%2C33623%2C1731178321351.1731178322200 2024-11-09T18:52:02,205 INFO [RS:1;fb97eb0edbe8:35949 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35949,1731178321404/fb97eb0edbe8%2C35949%2C1731178321404.1731178322196 2024-11-09T18:52:02,206 INFO [RS:2;fb97eb0edbe8:35061 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,35061,1731178321440/fb97eb0edbe8%2C35061%2C1731178321440.1731178322196 2024-11-09T18:52:02,207 DEBUG [RS:1;fb97eb0edbe8:35949 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:33711:33711),(127.0.0.1/127.0.0.1:36383:36383)] 2024-11-09T18:52:02,207 DEBUG [RS:2;fb97eb0edbe8:35061 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40147:40147),(127.0.0.1/127.0.0.1:33711:33711),(127.0.0.1/127.0.0.1:36383:36383)] 2024-11-09T18:52:02,209 INFO [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,33623,1731178321351/fb97eb0edbe8%2C33623%2C1731178321351.1731178322200 2024-11-09T18:52:02,210 DEBUG [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:33711:33711),(127.0.0.1/127.0.0.1:40147:40147)] 2024-11-09T18:52:02,356 DEBUG [fb97eb0edbe8:45825 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-09T18:52:02,357 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(204): Hosts are {fb97eb0edbe8=0} racks are {/default-rack=0} 2024-11-09T18:52:02,359 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T18:52:02,359 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T18:52:02,359 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T18:52:02,360 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T18:52:02,360 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T18:52:02,360 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T18:52:02,360 INFO [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T18:52:02,360 INFO [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T18:52:02,360 INFO [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T18:52:02,360 DEBUG [fb97eb0edbe8:45825 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T18:52:02,360 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,362 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fb97eb0edbe8,33623,1731178321351, state=OPENING 2024-11-09T18:52:02,379 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-09T18:52:02,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:02,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:02,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:02,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:02,390 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,391 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,391 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-09T18:52:02,391 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,33623,1731178321351}] 2024-11-09T18:52:02,392 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,545 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-09T18:52:02,547 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58997, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-09T18:52:02,552 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-09T18:52:02,552 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-09T18:52:02,555 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=fb97eb0edbe8%2C33623%2C1731178321351.meta, suffix=.meta, logDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,33623,1731178321351, archiveDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs, maxLogs=32 2024-11-09T18:52:02,556 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor fb97eb0edbe8%2C33623%2C1731178321351.meta.1731178322555.meta 2024-11-09T18:52:02,564 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/WALs/fb97eb0edbe8,33623,1731178321351/fb97eb0edbe8%2C33623%2C1731178321351.meta.1731178322555.meta 2024-11-09T18:52:02,565 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36383:36383),(127.0.0.1/127.0.0.1:33711:33711),(127.0.0.1/127.0.0.1:40147:40147)] 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-09T18:52:02,566 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:02,566 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-09T18:52:02,567 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-09T18:52:02,568 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-09T18:52:02,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-09T18:52:02,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:02,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:02,570 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-09T18:52:02,571 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-09T18:52:02,571 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-09T18:52:02,573 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-09T18:52:02,573 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:02,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:02,574 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-09T18:52:02,575 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-09T18:52:02,575 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:02,575 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-09T18:52:02,576 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-09T18:52:02,576 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740 2024-11-09T18:52:02,578 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740 2024-11-09T18:52:02,579 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-09T18:52:02,580 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-09T18:52:02,580 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-09T18:52:02,582 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-09T18:52:02,583 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61551231, jitterRate=-0.08281518518924713}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-09T18:52:02,583 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-09T18:52:02,584 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731178322567Writing region info on filesystem at 1731178322567Initializing all the Stores at 1731178322568 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178322568Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178322568Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178322568Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731178322568Cleaning up temporary data from old regions at 1731178322580 (+12 ms)Running coprocessor post-open hooks at 1731178322583 (+3 ms)Region opened successfully at 1731178322583 2024-11-09T18:52:02,585 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731178322545 2024-11-09T18:52:02,588 DEBUG [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-09T18:52:02,588 INFO [RS_OPEN_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-09T18:52:02,589 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,591 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as fb97eb0edbe8,33623,1731178321351, state=OPEN 2024-11-09T18:52:02,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:52:02,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:52:02,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:52:02,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-09T18:52:02,600 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,600 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-09T18:52:02,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-09T18:52:02,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=fb97eb0edbe8,33623,1731178321351 in 209 msec 2024-11-09T18:52:02,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-09T18:52:02,610 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 654 msec 2024-11-09T18:52:02,611 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-09T18:52:02,611 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-09T18:52:02,613 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T18:52:02,613 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fb97eb0edbe8,33623,1731178321351, seqNum=-1] 2024-11-09T18:52:02,613 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T18:52:02,615 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:37967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T18:52:02,626 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-11-09T18:52:02,626 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731178322626, completionTime=-1 2024-11-09T18:52:02,626 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-09T18:52:02,626 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-09T18:52:02,628 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-09T18:52:02,628 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731178382628 2024-11-09T18:52:02,628 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731178442628 2024-11-09T18:52:02,628 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-09T18:52:02,629 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-09T18:52:02,629 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,629 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,629 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,629 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-fb97eb0edbe8:45825, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,629 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,630 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,632 DEBUG [master/fb97eb0edbe8:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-09T18:52:02,635 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.129sec 2024-11-09T18:52:02,635 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-09T18:52:02,635 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-09T18:52:02,635 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-09T18:52:02,636 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-09T18:52:02,636 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-09T18:52:02,636 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-09T18:52:02,636 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-09T18:52:02,638 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-09T18:52:02,639 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-09T18:52:02,639 INFO [master/fb97eb0edbe8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=fb97eb0edbe8,45825,1731178321182-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-09T18:52:02,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1438846d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:52:02,675 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request fb97eb0edbe8,45825,-1 for getting cluster id 2024-11-09T18:52:02,675 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-09T18:52:02,676 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e012dde2-f5c9-48be-9624-1eea107e69b7' 2024-11-09T18:52:02,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-09T18:52:02,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e012dde2-f5c9-48be-9624-1eea107e69b7" 2024-11-09T18:52:02,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@754f38b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:52:02,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [fb97eb0edbe8,45825,-1] 2024-11-09T18:52:02,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-09T18:52:02,678 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:02,679 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51800, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-09T18:52:02,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43da8804, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-09T18:52:02,681 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-09T18:52:02,682 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=fb97eb0edbe8,33623,1731178321351, seqNum=-1] 2024-11-09T18:52:02,682 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-09T18:52:02,684 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39674, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-09T18:52:02,686 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:02,687 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-09T18:52:02,688 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:02,688 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@1f2e9fa0 2024-11-09T18:52:02,688 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-09T18:52:02,690 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51810, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-09T18:52:02,691 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-09T18:52:02,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-09T18:52:02,695 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-09T18:52:02,695 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:02,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-09T18:52:02,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:52:02,697 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-09T18:52:02,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741837_1013 (size=392) 2024-11-09T18:52:02,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741837_1013 (size=392) 2024-11-09T18:52:02,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741837_1013 (size=392) 2024-11-09T18:52:02,709 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b57a9c451f0886339e4ff0a85e749d9d, NAME => 'TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23 2024-11-09T18:52:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741838_1014 (size=51) 2024-11-09T18:52:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741838_1014 (size=51) 2024-11-09T18:52:02,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741838_1014 (size=51) 2024-11-09T18:52:02,719 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:02,720 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing b57a9c451f0886339e4ff0a85e749d9d, disabling compactions & flushes 2024-11-09T18:52:02,720 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:02,720 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:02,720 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. after waiting 0 ms 2024-11-09T18:52:02,720 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:02,720 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:02,720 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for b57a9c451f0886339e4ff0a85e749d9d: Waiting for close lock at 1731178322720Disabling compacts and flushes for region at 1731178322720Disabling writes for close at 1731178322720Writing region close event to WAL at 1731178322720Closed at 1731178322720 2024-11-09T18:52:02,722 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-09T18:52:02,723 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731178322722"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731178322722"}]},"ts":"1731178322722"} 2024-11-09T18:52:02,726 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-09T18:52:02,729 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-09T18:52:02,729 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731178322729"}]},"ts":"1731178322729"} 2024-11-09T18:52:02,733 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-09T18:52:02,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {fb97eb0edbe8=0} racks are {/default-rack=0} 2024-11-09T18:52:02,734 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-09T18:52:02,734 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-09T18:52:02,734 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-09T18:52:02,734 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-09T18:52:02,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-09T18:52:02,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-09T18:52:02,735 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-09T18:52:02,735 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-09T18:52:02,735 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-09T18:52:02,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-09T18:52:02,735 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b57a9c451f0886339e4ff0a85e749d9d, ASSIGN}] 2024-11-09T18:52:02,738 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b57a9c451f0886339e4ff0a85e749d9d, ASSIGN 2024-11-09T18:52:02,739 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b57a9c451f0886339e4ff0a85e749d9d, ASSIGN; state=OFFLINE, location=fb97eb0edbe8,33623,1731178321351; forceNewPlan=false, retain=false 2024-11-09T18:52:02,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:52:02,890 INFO [fb97eb0edbe8:45825 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-09T18:52:02,890 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b57a9c451f0886339e4ff0a85e749d9d, regionState=OPENING, regionLocation=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:02,895 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b57a9c451f0886339e4ff0a85e749d9d, ASSIGN because future has completed 2024-11-09T18:52:02,895 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b57a9c451f0886339e4ff0a85e749d9d, server=fb97eb0edbe8,33623,1731178321351}] 2024-11-09T18:52:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:52:03,055 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,055 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b57a9c451f0886339e4ff0a85e749d9d, NAME => 'TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d.', STARTKEY => '', ENDKEY => ''} 2024-11-09T18:52:03,056 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,056 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-09T18:52:03,056 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,056 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,058 INFO [StoreOpener-b57a9c451f0886339e4ff0a85e749d9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,060 INFO [StoreOpener-b57a9c451f0886339e4ff0a85e749d9d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b57a9c451f0886339e4ff0a85e749d9d columnFamilyName cf 2024-11-09T18:52:03,061 DEBUG [StoreOpener-b57a9c451f0886339e4ff0a85e749d9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-09T18:52:03,061 INFO [StoreOpener-b57a9c451f0886339e4ff0a85e749d9d-1 {}] regionserver.HStore(327): Store=b57a9c451f0886339e4ff0a85e749d9d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-09T18:52:03,062 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,063 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,063 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,064 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,064 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,067 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,070 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-09T18:52:03,071 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b57a9c451f0886339e4ff0a85e749d9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70829130, jitterRate=0.055436283349990845}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-09T18:52:03,071 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,072 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b57a9c451f0886339e4ff0a85e749d9d: Running coprocessor pre-open hook at 1731178323056Writing region info on filesystem at 1731178323056Initializing all the Stores at 1731178323057 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731178323058 (+1 ms)Cleaning up temporary data from old regions at 1731178323064 (+6 ms)Running coprocessor post-open hooks at 1731178323071 (+7 ms)Region opened successfully at 1731178323072 (+1 ms) 2024-11-09T18:52:03,074 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d., pid=6, masterSystemTime=1731178323050 2024-11-09T18:52:03,077 DEBUG [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,077 INFO [RS_OPEN_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b57a9c451f0886339e4ff0a85e749d9d, regionState=OPEN, openSeqNum=2, regionLocation=fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:03,082 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b57a9c451f0886339e4ff0a85e749d9d, server=fb97eb0edbe8,33623,1731178321351 because future has completed 2024-11-09T18:52:03,090 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-09T18:52:03,092 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b57a9c451f0886339e4ff0a85e749d9d, server=fb97eb0edbe8,33623,1731178321351 in 190 msec 2024-11-09T18:52:03,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-09T18:52:03,095 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b57a9c451f0886339e4ff0a85e749d9d, ASSIGN in 355 msec 2024-11-09T18:52:03,097 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-09T18:52:03,098 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731178323097"}]},"ts":"1731178323097"} 2024-11-09T18:52:03,102 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-09T18:52:03,104 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-09T18:52:03,107 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 413 msec 2024-11-09T18:52:03,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-09T18:52:03,322 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T18:52:03,322 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-09T18:52:03,322 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T18:52:03,325 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-09T18:52:03,326 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-09T18:52:03,326 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-09T18:52:03,330 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d., hostname=fb97eb0edbe8,33623,1731178321351, seqNum=2] 2024-11-09T18:52:03,334 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-09T18:52:03,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-09T18:52:03,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:52:03,338 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-09T18:52:03,340 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-09T18:52:03,340 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-09T18:52:03,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:52:03,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33623 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-09T18:52:03,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,496 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b57a9c451f0886339e4ff0a85e749d9d 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-09T18:52:03,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/.tmp/cf/9ddc4a5731314ee8b978ecaa861b9431 is 36, key is row/cf:cq/1731178323332/Put/seqid=0 2024-11-09T18:52:03,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741839_1015 (size=4787) 2024-11-09T18:52:03,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741839_1015 (size=4787) 2024-11-09T18:52:03,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741839_1015 (size=4787) 2024-11-09T18:52:03,522 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/.tmp/cf/9ddc4a5731314ee8b978ecaa861b9431 2024-11-09T18:52:03,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/.tmp/cf/9ddc4a5731314ee8b978ecaa861b9431 as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/cf/9ddc4a5731314ee8b978ecaa861b9431 2024-11-09T18:52:03,540 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/cf/9ddc4a5731314ee8b978ecaa861b9431, entries=1, sequenceid=5, filesize=4.7 K 2024-11-09T18:52:03,542 INFO [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for b57a9c451f0886339e4ff0a85e749d9d in 45ms, sequenceid=5, compaction requested=false 2024-11-09T18:52:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b57a9c451f0886339e4ff0a85e749d9d: 2024-11-09T18:52:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/fb97eb0edbe8:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-09T18:52:03,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-09T18:52:03,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-09T18:52:03,550 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 204 msec 2024-11-09T18:52:03,554 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 217 msec 2024-11-09T18:52:03,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45825 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-09T18:52:03,651 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-09T18:52:03,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-09T18:52:03,656 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T18:52:03,657 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:52:03,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,657 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,657 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-09T18:52:03,657 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=437850669, stopped=false 2024-11-09T18:52:03,657 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=fb97eb0edbe8,45825,1731178321182 2024-11-09T18:52:03,657 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-09T18:52:03,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:03,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:03,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:03,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-09T18:52:03,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:03,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:03,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:03,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:03,706 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T18:52:03,706 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-09T18:52:03,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:03,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:03,706 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:03,707 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:52:03,707 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-09T18:52:03,707 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,33623,1731178321351' ***** 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,35949,1731178321404' ***** 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'fb97eb0edbe8,35061,1731178321440' ***** 2024-11-09T18:52:03,707 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-09T18:52:03,707 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:52:03,707 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:52:03,707 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:52:03,707 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:52:03,708 INFO [RS:0;fb97eb0edbe8:33623 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:52:03,708 INFO [RS:1;fb97eb0edbe8:35949 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:52:03,708 INFO [RS:0;fb97eb0edbe8:33623 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-09T18:52:03,708 INFO [RS:1;fb97eb0edbe8:35949 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:52:03,708 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(3091): Received CLOSE for b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,708 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:03,708 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-09T18:52:03,708 INFO [RS:1;fb97eb0edbe8:35949 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;fb97eb0edbe8:35949. 2024-11-09T18:52:03,708 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:03,708 DEBUG [RS:1;fb97eb0edbe8:35949 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:52:03,708 DEBUG [RS:1;fb97eb0edbe8:35949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,708 INFO [RS:2;fb97eb0edbe8:35061 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;fb97eb0edbe8:35061. 2024-11-09T18:52:03,709 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,35949,1731178321404; all regions closed. 2024-11-09T18:52:03,709 DEBUG [RS:2;fb97eb0edbe8:35061 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:52:03,709 DEBUG [RS:2;fb97eb0edbe8:35061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,709 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,35061,1731178321440; all regions closed. 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(959): stopping server fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;fb97eb0edbe8:33623. 2024-11-09T18:52:03,709 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,709 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,709 DEBUG [RS:0;fb97eb0edbe8:33623 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-09T18:52:03,709 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,709 DEBUG [RS:0;fb97eb0edbe8:33623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,709 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:52:03,709 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:52:03,710 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-09T18:52:03,710 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b57a9c451f0886339e4ff0a85e749d9d, disabling compactions & flushes 2024-11-09T18:52:03,710 INFO [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,710 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,710 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. after waiting 0 ms 2024-11-09T18:52:03,710 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,711 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,711 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-09T18:52:03,711 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1325): Online Regions={b57a9c451f0886339e4ff0a85e749d9d=TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d., 1588230740=hbase:meta,,1.1588230740} 2024-11-09T18:52:03,711 DEBUG [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b57a9c451f0886339e4ff0a85e749d9d 2024-11-09T18:52:03,711 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,711 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,711 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,711 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-09T18:52:03,712 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-09T18:52:03,712 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,712 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-09T18:52:03,712 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,712 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-09T18:52:03,712 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-09T18:52:03,712 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-09T18:52:03,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741833_1009 (size=93) 2024-11-09T18:52:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741833_1009 (size=93) 2024-11-09T18:52:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741833_1009 (size=93) 2024-11-09T18:52:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741834_1010 (size=93) 2024-11-09T18:52:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741834_1010 (size=93) 2024-11-09T18:52:03,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741834_1010 (size=93) 2024-11-09T18:52:03,721 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/default/TestHBaseWalOnEC/b57a9c451f0886339e4ff0a85e749d9d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-09T18:52:03,721 DEBUG [RS:1;fb97eb0edbe8:35949 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fb97eb0edbe8%2C35949%2C1731178321404:(num 1731178322196) 2024-11-09T18:52:03,722 DEBUG [RS:1;fb97eb0edbe8:35949 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:52:03,722 DEBUG [RS:2;fb97eb0edbe8:35061 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:52:03,722 INFO [RS:2;fb97eb0edbe8:35061 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fb97eb0edbe8%2C35061%2C1731178321440:(num 1731178322196) 2024-11-09T18:52:03,722 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:52:03,722 DEBUG [RS:2;fb97eb0edbe8:35061 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,722 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,722 INFO [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:52:03,723 INFO [RS:1;fb97eb0edbe8:35949 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35949 2024-11-09T18:52:03,723 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b57a9c451f0886339e4ff0a85e749d9d: Waiting for close lock at 1731178323710Running coprocessor pre-close hooks at 1731178323710Disabling compacts and flushes for region at 1731178323710Disabling writes for close at 1731178323710Writing region close event to WAL at 1731178323713 (+3 ms)Running coprocessor post-close hooks at 1731178323722 (+9 ms)Closed at 1731178323722 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-09T18:52:03,723 DEBUG [RS_CLOSE_REGION-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d. 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-09T18:52:03,723 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:52:03,723 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-09T18:52:03,723 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:52:03,724 INFO [RS:2;fb97eb0edbe8:35061 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:35061 2024-11-09T18:52:03,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:52:03,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,35949,1731178321404 2024-11-09T18:52:03,732 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:52:03,735 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/info/502cc5bfd1f64b65aaaed7d9ba84e538 is 153, key is TestHBaseWalOnEC,,1731178322691.b57a9c451f0886339e4ff0a85e749d9d./info:regioninfo/1731178323078/Put/seqid=0 2024-11-09T18:52:03,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,35061,1731178321440 2024-11-09T18:52:03,742 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:52:03,742 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f69c08f5140@3b33d911 rejected from java.util.concurrent.ThreadPoolExecutor@3f0c5aaf[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-09T18:52:03,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741840_1016 (size=6637) 2024-11-09T18:52:03,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741840_1016 (size=6637) 2024-11-09T18:52:03,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741840_1016 (size=6637) 2024-11-09T18:52:03,744 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/info/502cc5bfd1f64b65aaaed7d9ba84e538 2024-11-09T18:52:03,753 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,35949,1731178321404] 2024-11-09T18:52:03,762 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,766 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/ns/1969ae0030ca4fd09a629c7bcc0059ec is 43, key is default/ns:d/1731178322615/Put/seqid=0 2024-11-09T18:52:03,768 WARN [IPC Server handler 0 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:03,768 WARN [IPC Server handler 0 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:03,768 WARN [IPC Server handler 0 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:03,768 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,768 INFO [regionserver/fb97eb0edbe8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741841_1017 (size=5153) 2024-11-09T18:52:03,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741841_1017 (size=5153) 2024-11-09T18:52:03,774 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,35949,1731178321404 already deleted, retry=false 2024-11-09T18:52:03,774 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,35949,1731178321404 expired; onlineServers=2 2024-11-09T18:52:03,774 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,35061,1731178321440] 2024-11-09T18:52:03,774 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/ns/1969ae0030ca4fd09a629c7bcc0059ec 2024-11-09T18:52:03,784 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,35061,1731178321440 already deleted, retry=false 2024-11-09T18:52:03,784 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,35061,1731178321440 expired; onlineServers=1 2024-11-09T18:52:03,798 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/table/f69255685210467abdc1cc40b17e6516 is 52, key is TestHBaseWalOnEC/table:state/1731178323097/Put/seqid=0 2024-11-09T18:52:03,799 WARN [IPC Server handler 2 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:03,800 WARN [IPC Server handler 2 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:03,800 WARN [IPC Server handler 2 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:03,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741842_1018 (size=5249) 2024-11-09T18:52:03,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741842_1018 (size=5249) 2024-11-09T18:52:03,806 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/table/f69255685210467abdc1cc40b17e6516 2024-11-09T18:52:03,815 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/info/502cc5bfd1f64b65aaaed7d9ba84e538 as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/info/502cc5bfd1f64b65aaaed7d9ba84e538 2024-11-09T18:52:03,825 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/info/502cc5bfd1f64b65aaaed7d9ba84e538, entries=10, sequenceid=11, filesize=6.5 K 2024-11-09T18:52:03,827 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/ns/1969ae0030ca4fd09a629c7bcc0059ec as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/ns/1969ae0030ca4fd09a629c7bcc0059ec 2024-11-09T18:52:03,835 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/ns/1969ae0030ca4fd09a629c7bcc0059ec, entries=2, sequenceid=11, filesize=5.0 K 2024-11-09T18:52:03,837 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/.tmp/table/f69255685210467abdc1cc40b17e6516 as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/table/f69255685210467abdc1cc40b17e6516 2024-11-09T18:52:03,846 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/table/f69255685210467abdc1cc40b17e6516, entries=2, sequenceid=11, filesize=5.1 K 2024-11-09T18:52:03,847 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-09T18:52:03,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:03,853 INFO [RS:1;fb97eb0edbe8:35949 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:52:03,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35949-0x10120f0d0b40002, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:03,853 INFO [RS:1;fb97eb0edbe8:35949 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,35949,1731178321404; zookeeper connection closed. 2024-11-09T18:52:03,853 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@48e64e06 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@48e64e06 2024-11-09T18:52:03,854 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-09T18:52:03,855 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-09T18:52:03,855 INFO [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-09T18:52:03,855 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731178323711Running coprocessor pre-close hooks at 1731178323711Disabling compacts and flushes for region at 1731178323711Disabling writes for close at 1731178323712 (+1 ms)Obtaining lock to block concurrent updates at 1731178323712Preparing flush snapshotting stores in 1588230740 at 1731178323712Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731178323713 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731178323714 (+1 ms)Flushing 1588230740/info: creating writer at 1731178323714Flushing 1588230740/info: appending metadata at 1731178323735 (+21 ms)Flushing 1588230740/info: closing flushed file at 1731178323735Flushing 1588230740/ns: creating writer at 1731178323751 (+16 ms)Flushing 1588230740/ns: appending metadata at 1731178323766 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731178323766Flushing 1588230740/table: creating writer at 1731178323782 (+16 ms)Flushing 1588230740/table: appending metadata at 1731178323797 (+15 ms)Flushing 1588230740/table: closing flushed file at 1731178323797Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@70d6aff4: reopening flushed file at 1731178323814 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@595cf3c7: reopening flushed file at 1731178323826 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@63312d33: reopening flushed file at 1731178323836 (+10 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1731178323847 (+11 ms)Writing region close event to WAL at 1731178323849 (+2 ms)Running coprocessor post-close hooks at 1731178323855 (+6 ms)Closed at 1731178323855 2024-11-09T18:52:03,855 DEBUG [RS_CLOSE_META-regionserver/fb97eb0edbe8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-09T18:52:03,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:03,863 INFO [RS:2;fb97eb0edbe8:35061 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:52:03,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35061-0x10120f0d0b40003, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:03,863 INFO [RS:2;fb97eb0edbe8:35061 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,35061,1731178321440; zookeeper connection closed. 2024-11-09T18:52:03,864 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56364164 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56364164 2024-11-09T18:52:03,911 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(976): stopping server fb97eb0edbe8,33623,1731178321351; all regions closed. 2024-11-09T18:52:03,912 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,912 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,912 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,912 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,912 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741836_1012 (size=2751) 2024-11-09T18:52:03,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741836_1012 (size=2751) 2024-11-09T18:52:03,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741836_1012 (size=2751) 2024-11-09T18:52:03,918 DEBUG [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs 2024-11-09T18:52:03,918 INFO [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fb97eb0edbe8%2C33623%2C1731178321351.meta:.meta(num 1731178322555) 2024-11-09T18:52:03,919 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,919 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,919 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,919 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,919 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:03,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741835_1011 (size=1298) 2024-11-09T18:52:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741835_1011 (size=1298) 2024-11-09T18:52:03,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741835_1011 (size=1298) 2024-11-09T18:52:03,925 DEBUG [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/oldWALs 2024-11-09T18:52:03,925 INFO [RS:0;fb97eb0edbe8:33623 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog fb97eb0edbe8%2C33623%2C1731178321351:(num 1731178322200) 2024-11-09T18:52:03,925 DEBUG [RS:0;fb97eb0edbe8:33623 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-09T18:52:03,925 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.LeaseManager(133): Closed leases 2024-11-09T18:52:03,925 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:52:03,925 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.ChoreService(370): Chore service for: regionserver/fb97eb0edbe8:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-09T18:52:03,926 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:52:03,926 INFO [regionserver/fb97eb0edbe8:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:52:03,926 INFO [RS:0;fb97eb0edbe8:33623 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:33623 2024-11-09T18:52:03,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-09T18:52:03,937 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/fb97eb0edbe8,33623,1731178321351 2024-11-09T18:52:03,937 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:52:03,938 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [fb97eb0edbe8,33623,1731178321351] 2024-11-09T18:52:03,958 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/fb97eb0edbe8,33623,1731178321351 already deleted, retry=false 2024-11-09T18:52:03,958 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; fb97eb0edbe8,33623,1731178321351 expired; onlineServers=0 2024-11-09T18:52:03,958 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'fb97eb0edbe8,45825,1731178321182' ***** 2024-11-09T18:52:03,958 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-09T18:52:03,958 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-09T18:52:03,958 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-09T18:52:03,959 DEBUG [M:0;fb97eb0edbe8:45825 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-09T18:52:03,959 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-09T18:52:03,959 DEBUG [M:0;fb97eb0edbe8:45825 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-09T18:52:03,959 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178321881 {}] cleaner.HFileCleaner(306): Exit Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.small.0-1731178321881,5,FailOnTimeoutGroup] 2024-11-09T18:52:03,959 DEBUG [master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178321880 {}] cleaner.HFileCleaner(306): Exit Thread[master/fb97eb0edbe8:0:becomeActiveMaster-HFileCleaner.large.0-1731178321880,5,FailOnTimeoutGroup] 2024-11-09T18:52:03,959 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.ChoreService(370): Chore service for: master/fb97eb0edbe8:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-09T18:52:03,959 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-09T18:52:03,959 DEBUG [M:0;fb97eb0edbe8:45825 {}] master.HMaster(1795): Stopping service threads 2024-11-09T18:52:03,959 INFO [M:0;fb97eb0edbe8:45825 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-09T18:52:03,959 INFO [M:0;fb97eb0edbe8:45825 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-09T18:52:03,960 INFO [M:0;fb97eb0edbe8:45825 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-09T18:52:03,960 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-09T18:52:03,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-09T18:52:03,969 DEBUG [M:0;fb97eb0edbe8:45825 {}] zookeeper.ZKUtil(347): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-09T18:52:03,969 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-09T18:52:03,969 WARN [M:0;fb97eb0edbe8:45825 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-09T18:52:03,970 INFO [M:0;fb97eb0edbe8:45825 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/.lastflushedseqids 2024-11-09T18:52:03,972 WARN [IPC Server handler 3 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:03,972 WARN [IPC Server handler 3 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:03,972 WARN [IPC Server handler 3 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:03,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741843_1019 (size=127) 2024-11-09T18:52:03,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741843_1019 (size=127) 2024-11-09T18:52:03,981 INFO [M:0;fb97eb0edbe8:45825 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-09T18:52:03,981 INFO [M:0;fb97eb0edbe8:45825 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-09T18:52:03,982 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-09T18:52:03,982 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:03,982 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:03,982 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-09T18:52:03,982 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:03,982 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-09T18:52:04,008 DEBUG [M:0;fb97eb0edbe8:45825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ec096942f62495fa50525dc44313e4b is 82, key is hbase:meta,,1/info:regioninfo/1731178322589/Put/seqid=0 2024-11-09T18:52:04,010 WARN [IPC Server handler 1 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:04,010 WARN [IPC Server handler 1 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:04,010 WARN [IPC Server handler 1 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:04,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741844_1020 (size=5672) 2024-11-09T18:52:04,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741844_1020 (size=5672) 2024-11-09T18:52:04,020 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ec096942f62495fa50525dc44313e4b 2024-11-09T18:52:04,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:04,048 INFO [RS:0;fb97eb0edbe8:33623 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:52:04,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33623-0x10120f0d0b40001, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:04,048 INFO [RS:0;fb97eb0edbe8:33623 {}] regionserver.HRegionServer(1031): Exiting; stopping=fb97eb0edbe8,33623,1731178321351; zookeeper connection closed. 2024-11-09T18:52:04,055 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46c2e1d1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46c2e1d1 2024-11-09T18:52:04,056 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-09T18:52:04,059 DEBUG [M:0;fb97eb0edbe8:45825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56bcb84db13448cbb8db088878717210 is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731178323106/Put/seqid=0 2024-11-09T18:52:04,060 WARN [IPC Server handler 0 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:04,060 WARN [IPC Server handler 0 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:04,061 WARN [IPC Server handler 0 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:04,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741845_1021 (size=6440) 2024-11-09T18:52:04,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741845_1021 (size=6440) 2024-11-09T18:52:04,090 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56bcb84db13448cbb8db088878717210 2024-11-09T18:52:04,128 DEBUG [M:0;fb97eb0edbe8:45825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1d227c4e7b84ce6b05dc30ccab4592c is 69, key is fb97eb0edbe8,33623,1731178321351/rs:state/1731178321954/Put/seqid=0 2024-11-09T18:52:04,130 WARN [IPC Server handler 3 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-09T18:52:04,130 WARN [IPC Server handler 3 on default port 34747 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-09T18:52:04,130 WARN [IPC Server handler 3 on default port 34747 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-09T18:52:04,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741846_1022 (size=5294) 2024-11-09T18:52:04,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741846_1022 (size=5294) 2024-11-09T18:52:04,538 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1d227c4e7b84ce6b05dc30ccab4592c 2024-11-09T18:52:04,546 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0ec096942f62495fa50525dc44313e4b as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ec096942f62495fa50525dc44313e4b 2024-11-09T18:52:04,555 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0ec096942f62495fa50525dc44313e4b, entries=8, sequenceid=72, filesize=5.5 K 2024-11-09T18:52:04,557 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/56bcb84db13448cbb8db088878717210 as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/56bcb84db13448cbb8db088878717210 2024-11-09T18:52:04,568 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/56bcb84db13448cbb8db088878717210, entries=8, sequenceid=72, filesize=6.3 K 2024-11-09T18:52:04,569 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a1d227c4e7b84ce6b05dc30ccab4592c as hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a1d227c4e7b84ce6b05dc30ccab4592c 2024-11-09T18:52:04,578 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34747/user/jenkins/test-data/1585f722-4b97-7787-6eeb-cbb9aac09f23/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a1d227c4e7b84ce6b05dc30ccab4592c, entries=3, sequenceid=72, filesize=5.2 K 2024-11-09T18:52:04,580 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=72, compaction requested=false 2024-11-09T18:52:04,582 INFO [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-09T18:52:04,582 DEBUG [M:0;fb97eb0edbe8:45825 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731178323982Disabling compacts and flushes for region at 1731178323982Disabling writes for close at 1731178323982Obtaining lock to block concurrent updates at 1731178323982Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731178323982Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731178323983 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731178323984 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731178323984Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731178324007 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731178324007Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731178324029 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731178324058 (+29 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731178324058Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731178324108 (+50 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731178324128 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731178324128Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2463d278: reopening flushed file at 1731178324545 (+417 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4954e6d5: reopening flushed file at 1731178324555 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1c5b953e: reopening flushed file at 1731178324568 (+13 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 598ms, sequenceid=72, compaction requested=false at 1731178324580 (+12 ms)Writing region close event to WAL at 1731178324582 (+2 ms)Closed at 1731178324582 2024-11-09T18:52:04,582 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:04,583 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:04,583 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:04,583 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:04,583 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-09T18:52:04,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38923 is added to blk_1073741830_1006 (size=32686) 2024-11-09T18:52:04,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45411 is added to blk_1073741830_1006 (size=32686) 2024-11-09T18:52:04,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40859 is added to blk_1073741830_1006 (size=32686) 2024-11-09T18:52:04,988 INFO [M:0;fb97eb0edbe8:45825 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-09T18:52:04,988 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-09T18:52:04,988 INFO [M:0;fb97eb0edbe8:45825 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45825 2024-11-09T18:52:04,988 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-09T18:52:05,153 INFO [M:0;fb97eb0edbe8:45825 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-09T18:52:05,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:05,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45825-0x10120f0d0b40000, quorum=127.0.0.1:57051, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-09T18:52:05,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55a80442{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:52:05,157 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d3aadcd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:52:05,157 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:52:05,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2924ad14{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:52:05,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46e75277{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,STOPPED} 2024-11-09T18:52:05,159 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:52:05,159 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:52:05,159 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-18283849-172.17.0.3-1731178317626 (Datanode Uuid 042ed729-ba26-4c21-ad70-1cb667b29c16) service to localhost/127.0.0.1:34747 2024-11-09T18:52:05,159 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:52:05,160 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data5/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,160 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data6/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,161 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:52:05,163 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@627fdc4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:52:05,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1781e9e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:52:05,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:52:05,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@436754c8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:52:05,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134b78f0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,STOPPED} 2024-11-09T18:52:05,169 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:52:05,169 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:52:05,169 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-18283849-172.17.0.3-1731178317626 (Datanode Uuid 4d8a22e4-73ab-4a1a-9f5a-e95024b543ea) service to localhost/127.0.0.1:34747 2024-11-09T18:52:05,169 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:52:05,169 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data3/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,170 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data4/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,170 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:52:05,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ed60080{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-09T18:52:05,172 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1f9b01b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:52:05,172 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:52:05,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@25117e55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:52:05,172 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51329d3a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,STOPPED} 2024-11-09T18:52:05,173 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-09T18:52:05,173 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-09T18:52:05,174 WARN [BP-18283849-172.17.0.3-1731178317626 heartbeating to localhost/127.0.0.1:34747 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-18283849-172.17.0.3-1731178317626 (Datanode Uuid 393b14e2-e9cc-4e26-ac84-2f0c1a06ad1a) service to localhost/127.0.0.1:34747 2024-11-09T18:52:05,174 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-09T18:52:05,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data1/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,174 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/cluster_d4319f6b-c8b1-57d0-2bc1-4ff0e010286c/data/data2/current/BP-18283849-172.17.0.3-1731178317626 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-09T18:52:05,175 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-09T18:52:05,181 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9f29b33{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-09T18:52:05,182 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a7ed972{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-09T18:52:05,182 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-09T18:52:05,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1537a16f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-09T18:52:05,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28c24cda{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/dbfd9d30-8d39-0cb7-dae5-fdbfbf2b5c04/hadoop.log.dir/,STOPPED} 2024-11-09T18:52:05,190 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-09T18:52:05,215 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-09T18:52:05,223 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 87) - Thread LEAK? -, OpenFileDescriptor=518 (was 439) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=176 (was 165) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4800 (was 5039)