2024-12-11 17:43:20,166 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 17:43:20,180 main DEBUG Took 0.011604 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-11 17:43:20,181 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-11 17:43:20,181 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-11 17:43:20,182 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-11 17:43:20,184 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,195 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-11 17:43:20,218 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,219 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,220 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,221 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,221 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,222 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,223 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,223 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,224 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,225 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,225 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,226 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,227 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,228 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,229 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,229 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,230 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,230 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,231 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,231 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,232 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,232 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-11 17:43:20,233 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,233 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-11 17:43:20,235 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-11 17:43:20,236 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-11 17:43:20,238 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-11 17:43:20,239 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-11 17:43:20,241 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-11 17:43:20,241 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-11 17:43:20,252 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-11 17:43:20,255 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-11 17:43:20,257 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-11 17:43:20,257 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-11 17:43:20,258 main DEBUG createAppenders(={Console}) 2024-12-11 17:43:20,259 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-11 17:43:20,259 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-11 17:43:20,259 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-11 17:43:20,259 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-11 17:43:20,260 main DEBUG OutputStream closed 2024-12-11 17:43:20,260 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-11 17:43:20,260 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-11 17:43:20,260 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-11 17:43:20,363 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-11 17:43:20,366 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-11 17:43:20,367 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-11 17:43:20,369 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-11 17:43:20,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-11 17:43:20,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-11 17:43:20,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-11 17:43:20,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-11 17:43:20,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-11 17:43:20,372 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-11 17:43:20,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-11 17:43:20,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-11 17:43:20,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-11 17:43:20,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-11 17:43:20,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-11 17:43:20,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-11 17:43:20,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-11 17:43:20,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-11 17:43:20,381 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11 17:43:20,382 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-11 17:43:20,382 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-11 17:43:20,384 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-11T17:43:20,403 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-11 17:43:20,407 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-11 17:43:20,407 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-11T17:43:20,749 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862 2024-12-11T17:43:20,780 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c, deleteOnExit=true 2024-12-11T17:43:20,781 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/test.cache.data in system properties and HBase conf 2024-12-11T17:43:20,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T17:43:20,783 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir in system properties and HBase conf 2024-12-11T17:43:20,784 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T17:43:20,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T17:43:20,785 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T17:43:20,946 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-11T17:43:21,088 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T17:43:21,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T17:43:21,094 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T17:43:21,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T17:43:21,095 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T17:43:21,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T17:43:21,096 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T17:43:21,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T17:43:21,097 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T17:43:21,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T17:43:21,098 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/nfs.dump.dir in system properties and HBase conf 2024-12-11T17:43:21,099 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/java.io.tmpdir in system properties and HBase conf 2024-12-11T17:43:21,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T17:43:21,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T17:43:21,101 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T17:43:22,283 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-11T17:43:22,364 INFO [Time-limited test {}] log.Log(170): Logging initialized @3048ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-11T17:43:22,434 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:22,493 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:22,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:22,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:22,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T17:43:22,534 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:22,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:22,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:22,715 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44270346{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/java.io.tmpdir/jetty-localhost-41811-hadoop-hdfs-3_4_1-tests_jar-_-any-17434586940896606580/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T17:43:22,722 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:41811} 2024-12-11T17:43:22,723 INFO [Time-limited test {}] server.Server(415): Started @3407ms 2024-12-11T17:43:23,350 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:23,356 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:23,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:23,357 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:23,357 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T17:43:23,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5a55babc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:23,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e9e5394{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:23,456 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ec777b6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/java.io.tmpdir/jetty-localhost-45897-hadoop-hdfs-3_4_1-tests_jar-_-any-17183012695525091058/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:23,457 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145f251e{HTTP/1.1, (http/1.1)}{localhost:45897} 2024-12-11T17:43:23,457 INFO [Time-limited test {}] server.Server(415): Started @4142ms 2024-12-11T17:43:23,509 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:23,614 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:23,619 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:23,623 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:23,624 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:23,624 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T17:43:23,627 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@56f2bf79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:23,628 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19093484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:23,771 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1327a94d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/java.io.tmpdir/jetty-localhost-41657-hadoop-hdfs-3_4_1-tests_jar-_-any-9920392727471530754/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:23,772 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@674554fc{HTTP/1.1, (http/1.1)}{localhost:41657} 2024-12-11T17:43:23,773 INFO [Time-limited test {}] server.Server(415): Started @4458ms 2024-12-11T17:43:23,776 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:23,816 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:23,821 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:23,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:23,823 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:23,823 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T17:43:23,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1646e48a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:23,824 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3891561d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:23,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@554ba3d5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/java.io.tmpdir/jetty-localhost-37267-hadoop-hdfs-3_4_1-tests_jar-_-any-15202211836510036760/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:23,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@64a37729{HTTP/1.1, (http/1.1)}{localhost:37267} 2024-12-11T17:43:23,928 INFO [Time-limited test {}] server.Server(415): Started @4612ms 2024-12-11T17:43:23,930 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:25,063 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data1/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,063 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data4/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,063 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data3/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,063 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data2/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,102 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:25,105 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:25,148 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fd4db0a8f90d307 with lease ID 0xb7962ab9d2cd830e: Processing first storage report for DS-867e9432-bbc8-479e-971b-c9cb96ab3687 from datanode DatanodeRegistration(127.0.0.1:34485, datanodeUuid=318ea0c6-a00f-47ab-827b-a89609a8569a, infoPort=45825, infoSecurePort=0, ipcPort=34895, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fd4db0a8f90d307 with lease ID 0xb7962ab9d2cd830e: from storage DS-867e9432-bbc8-479e-971b-c9cb96ab3687 node DatanodeRegistration(127.0.0.1:34485, datanodeUuid=318ea0c6-a00f-47ab-827b-a89609a8569a, infoPort=45825, infoSecurePort=0, ipcPort=34895, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4fc3d09bc3f4b05 with lease ID 0xb7962ab9d2cd830d: Processing first storage report for DS-581b03ce-375e-4b02-b0ce-172ebf015187 from datanode DatanodeRegistration(127.0.0.1:40997, datanodeUuid=67c57c5e-8889-44b3-ae38-f932490a6922, infoPort=37283, infoSecurePort=0, ipcPort=37393, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4fc3d09bc3f4b05 with lease ID 0xb7962ab9d2cd830d: from storage DS-581b03ce-375e-4b02-b0ce-172ebf015187 node DatanodeRegistration(127.0.0.1:40997, datanodeUuid=67c57c5e-8889-44b3-ae38-f932490a6922, infoPort=37283, infoSecurePort=0, ipcPort=37393, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,150 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5fd4db0a8f90d307 with lease ID 0xb7962ab9d2cd830e: Processing first storage report for DS-7af123a5-276c-4942-ad2c-9782736242e0 from datanode DatanodeRegistration(127.0.0.1:34485, datanodeUuid=318ea0c6-a00f-47ab-827b-a89609a8569a, infoPort=45825, infoSecurePort=0, ipcPort=34895, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5fd4db0a8f90d307 with lease ID 0xb7962ab9d2cd830e: from storage DS-7af123a5-276c-4942-ad2c-9782736242e0 node DatanodeRegistration(127.0.0.1:34485, datanodeUuid=318ea0c6-a00f-47ab-827b-a89609a8569a, infoPort=45825, infoSecurePort=0, ipcPort=34895, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,151 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd4fc3d09bc3f4b05 with lease ID 0xb7962ab9d2cd830d: Processing first storage report for DS-dcca77e2-4fd6-4e66-8a84-66cde08bfe08 from datanode DatanodeRegistration(127.0.0.1:40997, datanodeUuid=67c57c5e-8889-44b3-ae38-f932490a6922, infoPort=37283, infoSecurePort=0, ipcPort=37393, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd4fc3d09bc3f4b05 with lease ID 0xb7962ab9d2cd830d: from storage DS-dcca77e2-4fd6-4e66-8a84-66cde08bfe08 node DatanodeRegistration(127.0.0.1:40997, datanodeUuid=67c57c5e-8889-44b3-ae38-f932490a6922, infoPort=37283, infoSecurePort=0, ipcPort=37393, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,173 WARN [Thread-142 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data5/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,174 WARN [Thread-143 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data6/current/BP-1719711233-172.17.0.2-1733939001831/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:25,195 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:25,205 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe647c40da8422b02 with lease ID 0xb7962ab9d2cd830f: Processing first storage report for DS-6c88c14c-8541-496c-ac56-a988dea8456d from datanode DatanodeRegistration(127.0.0.1:34533, datanodeUuid=7c15638e-fa93-42a2-8967-150d2dbd251b, infoPort=38503, infoSecurePort=0, ipcPort=33887, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,205 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe647c40da8422b02 with lease ID 0xb7962ab9d2cd830f: from storage DS-6c88c14c-8541-496c-ac56-a988dea8456d node DatanodeRegistration(127.0.0.1:34533, datanodeUuid=7c15638e-fa93-42a2-8967-150d2dbd251b, infoPort=38503, infoSecurePort=0, ipcPort=33887, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe647c40da8422b02 with lease ID 0xb7962ab9d2cd830f: Processing first storage report for DS-d222d10c-227b-4839-994a-28667dea3799 from datanode DatanodeRegistration(127.0.0.1:34533, datanodeUuid=7c15638e-fa93-42a2-8967-150d2dbd251b, infoPort=38503, infoSecurePort=0, ipcPort=33887, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831) 2024-12-11T17:43:25,206 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe647c40da8422b02 with lease ID 0xb7962ab9d2cd830f: from storage DS-d222d10c-227b-4839-994a-28667dea3799 node DatanodeRegistration(127.0.0.1:34533, datanodeUuid=7c15638e-fa93-42a2-8967-150d2dbd251b, infoPort=38503, infoSecurePort=0, ipcPort=33887, storageInfo=lv=-57;cid=testClusterID;nsid=733946966;c=1733939001831), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:25,293 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862 2024-12-11T17:43:25,376 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-11T17:43:25,440 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=156, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=647, ProcessCount=11, AvailableMemoryMB=2975 2024-12-11T17:43:25,443 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T17:43:25,452 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-11T17:43:25,560 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/zookeeper_0, clientPort=52280, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T17:43:25,568 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52280 2024-12-11T17:43:25,576 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:25,578 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:25,675 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:25,675 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:25,725 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:35392 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35392 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:25,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-11T17:43:26,145 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:26,158 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 with version=8 2024-12-11T17:43:26,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/hbase-staging 2024-12-11T17:43:26,258 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-11T17:43:26,477 INFO [Time-limited test {}] client.ConnectionUtils(128): master/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:26,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:26,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:26,490 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:26,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:26,491 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:26,644 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T17:43:26,731 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-11T17:43:26,744 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-11T17:43:26,750 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:26,785 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 6996 (auto-detected) 2024-12-11T17:43:26,786 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-11T17:43:26,809 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33505 2024-12-11T17:43:26,830 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33505 connecting to ZooKeeper ensemble=127.0.0.1:52280 2024-12-11T17:43:26,909 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335050x0, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:26,913 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33505-0x100160747060000 connected 2024-12-11T17:43:27,019 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,022 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,032 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:27,036 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962, hbase.cluster.distributed=false 2024-12-11T17:43:27,063 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:27,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33505 2024-12-11T17:43:27,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33505 2024-12-11T17:43:27,068 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33505 2024-12-11T17:43:27,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33505 2024-12-11T17:43:27,069 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33505 2024-12-11T17:43:27,163 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:27,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,165 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:27,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,165 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:27,168 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:27,170 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:27,171 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38729 2024-12-11T17:43:27,172 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38729 connecting to ZooKeeper ensemble=127.0.0.1:52280 2024-12-11T17:43:27,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,213 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:387290x0, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:27,215 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38729-0x100160747060001 connected 2024-12-11T17:43:27,215 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:27,221 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:27,229 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:27,232 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:27,239 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:27,243 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38729 2024-12-11T17:43:27,245 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38729 2024-12-11T17:43:27,247 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38729 2024-12-11T17:43:27,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38729 2024-12-11T17:43:27,248 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38729 2024-12-11T17:43:27,263 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:27,263 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,263 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,264 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:27,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,264 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:27,264 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:27,265 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:27,265 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40257 2024-12-11T17:43:27,267 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40257 connecting to ZooKeeper ensemble=127.0.0.1:52280 2024-12-11T17:43:27,268 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,272 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,292 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:402570x0, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:27,293 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40257-0x100160747060002 connected 2024-12-11T17:43:27,293 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:402570x0, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:27,294 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:27,295 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:27,297 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:27,299 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:27,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40257 2024-12-11T17:43:27,300 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40257 2024-12-11T17:43:27,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40257 2024-12-11T17:43:27,301 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40257 2024-12-11T17:43:27,302 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40257 2024-12-11T17:43:27,320 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:27,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,320 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,321 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:27,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:27,321 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:27,321 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:27,322 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:27,323 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36797 2024-12-11T17:43:27,324 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36797 connecting to ZooKeeper ensemble=127.0.0.1:52280 2024-12-11T17:43:27,326 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,329 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:367970x0, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:27,342 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:367970x0, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:27,343 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36797-0x100160747060003 connected 2024-12-11T17:43:27,343 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:27,344 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:27,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:27,346 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:27,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36797 2024-12-11T17:43:27,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36797 2024-12-11T17:43:27,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36797 2024-12-11T17:43:27,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36797 2024-12-11T17:43:27,349 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36797 2024-12-11T17:43:27,362 DEBUG [M:0;75744186b12a:33505 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;75744186b12a:33505 2024-12-11T17:43:27,363 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/75744186b12a,33505,1733939006308 2024-12-11T17:43:27,376 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,376 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,378 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/75744186b12a,33505,1733939006308 2024-12-11T17:43:27,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,409 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:27,409 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,409 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:27,409 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:27,410 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,411 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T17:43:27,412 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/75744186b12a,33505,1733939006308 from backup master directory 2024-12-11T17:43:27,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/75744186b12a,33505,1733939006308 2024-12-11T17:43:27,425 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,425 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:27,427 WARN [master/75744186b12a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:27,427 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=75744186b12a,33505,1733939006308 2024-12-11T17:43:27,429 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-11T17:43:27,431 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-11T17:43:27,500 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/hbase.id] with ID: beea206b-0fd5-4cc0-a32a-451428069263 2024-12-11T17:43:27,500 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/.tmp/hbase.id 2024-12-11T17:43:27,512 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,512 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,516 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:33402 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:34485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33402 dst: /127.0.0.1:34485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:27,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-11T17:43:27,526 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:27,526 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/.tmp/hbase.id]:[hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/hbase.id] 2024-12-11T17:43:27,577 INFO [master/75744186b12a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:27,581 INFO [master/75744186b12a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T17:43:27,603 INFO [master/75744186b12a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-12-11T17:43:27,617 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,617 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:27,629 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,629 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:44276 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:34533:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:44276 dst: /127.0.0.1:34533 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:27,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-11T17:43:27,640 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:27,656 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T17:43:27,659 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T17:43:27,665 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T17:43:27,698 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,699 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:27,703 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:35418 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35418 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:27,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-11T17:43:28,114 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:28,158 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store 2024-12-11T17:43:28,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-11T17:43:28,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-11T17:43:28,187 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:28,187 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:28,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:35452 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35452 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:28,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-11T17:43:28,263 WARN [master/75744186b12a:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:28,269 INFO [master/75744186b12a:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-11T17:43:28,273 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:28,275 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T17:43:28,275 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:28,276 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:28,278 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T17:43:28,278 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:28,279 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:28,281 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733939008275Disabling compacts and flushes for region at 1733939008275Disabling writes for close at 1733939008278 (+3 ms)Writing region close event to WAL at 1733939008278Closed at 1733939008279 (+1 ms) 2024-12-11T17:43:28,284 WARN [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/.initializing 2024-12-11T17:43:28,284 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/WALs/75744186b12a,33505,1733939006308 2024-12-11T17:43:28,298 INFO [master/75744186b12a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T17:43:28,321 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C33505%2C1733939006308, suffix=, logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/WALs/75744186b12a,33505,1733939006308, archiveDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/oldWALs, maxLogs=10 2024-12-11T17:43:28,364 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/WALs/75744186b12a,33505,1733939006308/75744186b12a%2C33505%2C1733939006308.1733939008327, exclude list is [], retry=0 2024-12-11T17:43:28,398 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:28,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34485,DS-867e9432-bbc8-479e-971b-c9cb96ab3687,DISK] 2024-12-11T17:43:28,399 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34533,DS-6c88c14c-8541-496c-ac56-a988dea8456d,DISK] 2024-12-11T17:43:28,400 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40997,DS-581b03ce-375e-4b02-b0ce-172ebf015187,DISK] 2024-12-11T17:43:28,404 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-11T17:43:28,475 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/WALs/75744186b12a,33505,1733939006308/75744186b12a%2C33505%2C1733939006308.1733939008327 2024-12-11T17:43:28,480 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:45825:45825),(127.0.0.1/127.0.0.1:38503:38503)] 2024-12-11T17:43:28,481 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:28,482 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:28,488 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,489 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,593 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T17:43:28,598 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:28,605 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:28,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T17:43:28,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:28,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:28,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T17:43:28,619 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:28,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:28,622 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,627 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T17:43:28,627 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:28,629 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:28,630 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,636 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,638 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,648 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,649 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,654 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:28,662 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:28,691 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:28,694 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64010600, jitterRate=-0.046167731285095215}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:28,704 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733939008507Initializing all the Stores at 1733939008518 (+11 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939008518Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939008523 (+5 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939008523Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939008523Cleaning up temporary data from old regions at 1733939008649 (+126 ms)Region opened successfully at 1733939008704 (+55 ms) 2024-12-11T17:43:28,707 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T17:43:28,758 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44dc3bd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:28,804 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T17:43:28,822 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T17:43:28,822 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T17:43:28,828 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T17:43:28,829 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T17:43:28,836 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-12-11T17:43:28,837 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T17:43:28,880 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T17:43:28,893 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T17:43:28,955 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T17:43:28,959 INFO [master/75744186b12a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T17:43:28,961 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T17:43:28,971 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T17:43:28,975 INFO [master/75744186b12a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T17:43:28,984 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T17:43:28,996 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T17:43:29,003 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T17:43:29,013 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T17:43:29,040 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T17:43:29,049 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T17:43:29,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:29,063 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:29,064 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:29,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:29,064 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,064 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,064 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,067 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,074 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=75744186b12a,33505,1733939006308, sessionid=0x100160747060000, setting cluster-up flag (Was=false) 2024-12-11T17:43:29,108 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,108 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,133 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T17:43:29,136 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=75744186b12a,33505,1733939006308 2024-12-11T17:43:29,158 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,158 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:29,183 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T17:43:29,186 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=75744186b12a,33505,1733939006308 2024-12-11T17:43:29,196 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T17:43:29,256 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(746): ClusterId : beea206b-0fd5-4cc0-a32a-451428069263 2024-12-11T17:43:29,259 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:29,260 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(746): ClusterId : beea206b-0fd5-4cc0-a32a-451428069263 2024-12-11T17:43:29,260 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:29,275 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(746): ClusterId : beea206b-0fd5-4cc0-a32a-451428069263 2024-12-11T17:43:29,275 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:29,290 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:29,290 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:29,290 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:29,291 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:29,311 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:29,311 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:29,313 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:29,313 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:29,314 DEBUG [RS:0;75744186b12a:38729 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2f28abee, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:29,314 DEBUG [RS:2;75744186b12a:36797 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5805d43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:29,324 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:29,324 DEBUG [RS:1;75744186b12a:40257 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a9a5e25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:29,331 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:29,337 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;75744186b12a:36797 2024-12-11T17:43:29,344 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;75744186b12a:38729 2024-12-11T17:43:29,349 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;75744186b12a:40257 2024-12-11T17:43:29,352 INFO [master/75744186b12a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T17:43:29,361 INFO [RS:2;75744186b12a:36797 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:29,362 INFO [RS:2;75744186b12a:36797 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:29,362 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:29,361 INFO [RS:1;75744186b12a:40257 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:29,362 INFO [RS:1;75744186b12a:40257 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:29,362 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:29,367 INFO [master/75744186b12a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T17:43:29,366 INFO [RS:0;75744186b12a:38729 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:29,368 INFO [RS:0;75744186b12a:38729 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:29,368 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:29,371 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,33505,1733939006308 with port=36797, startcode=1733939007319 2024-12-11T17:43:29,371 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,33505,1733939006308 with port=40257, startcode=1733939007263 2024-12-11T17:43:29,372 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,33505,1733939006308 with port=38729, startcode=1733939007130 2024-12-11T17:43:29,382 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 75744186b12a,33505,1733939006308 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T17:43:29,390 DEBUG [RS:2;75744186b12a:36797 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:29,391 DEBUG [RS:1;75744186b12a:40257 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:29,391 DEBUG [RS:0;75744186b12a:38729 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:29,402 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:29,403 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:29,403 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:29,403 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:29,403 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/75744186b12a:0, corePoolSize=10, maxPoolSize=10 2024-12-11T17:43:29,404 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,404 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:29,404 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,452 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733939039452 2024-12-11T17:43:29,453 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:29,453 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T17:43:29,454 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T17:43:29,457 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T17:43:29,462 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T17:43:29,463 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T17:43:29,463 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T17:43:29,463 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T17:43:29,467 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:29,467 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T17:43:29,467 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,503 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T17:43:29,514 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T17:43:29,515 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T17:43:29,523 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T17:43:29,524 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T17:43:29,528 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55239, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:29,528 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:29,531 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939009525,5,FailOnTimeoutGroup] 2024-12-11T17:43:29,529 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49535, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:29,538 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:29,539 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:29,551 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939009531,5,FailOnTimeoutGroup] 2024-12-11T17:43:29,551 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,552 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T17:43:29,553 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,554 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,577 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,36797,1733939007319 2024-12-11T17:43:29,580 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(517): Registering regionserver=75744186b12a,36797,1733939007319 2024-12-11T17:43:29,602 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:35486 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35486 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:29,609 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,38729,1733939007130 2024-12-11T17:43:29,610 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(517): Registering regionserver=75744186b12a,38729,1733939007130 2024-12-11T17:43:29,614 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 2024-12-11T17:43:29,614 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45685 2024-12-11T17:43:29,614 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:29,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-11T17:43:29,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,40257,1733939007263 2024-12-11T17:43:29,640 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33505 {}] master.ServerManager(517): Registering regionserver=75744186b12a,40257,1733939007263 2024-12-11T17:43:29,640 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 2024-12-11T17:43:29,641 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45685 2024-12-11T17:43:29,641 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:29,649 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 2024-12-11T17:43:29,650 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45685 2024-12-11T17:43:29,650 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:29,655 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:29,689 DEBUG [RS:2;75744186b12a:36797 {}] zookeeper.ZKUtil(111): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,36797,1733939007319 2024-12-11T17:43:29,689 WARN [RS:2;75744186b12a:36797 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:29,690 INFO [RS:2;75744186b12a:36797 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T17:43:29,690 DEBUG [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,36797,1733939007319 2024-12-11T17:43:29,692 DEBUG [RS:1;75744186b12a:40257 {}] zookeeper.ZKUtil(111): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,40257,1733939007263 2024-12-11T17:43:29,692 WARN [RS:1;75744186b12a:40257 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:29,692 INFO [RS:1;75744186b12a:40257 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T17:43:29,692 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263 2024-12-11T17:43:29,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,38729,1733939007130] 2024-12-11T17:43:29,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,40257,1733939007263] 2024-12-11T17:43:29,693 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,36797,1733939007319] 2024-12-11T17:43:29,693 DEBUG [RS:0;75744186b12a:38729 {}] zookeeper.ZKUtil(111): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,38729,1733939007130 2024-12-11T17:43:29,694 WARN [RS:0;75744186b12a:38729 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:29,694 INFO [RS:0;75744186b12a:38729 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T17:43:29,694 DEBUG [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,38729,1733939007130 2024-12-11T17:43:29,734 INFO [RS:2;75744186b12a:36797 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:29,734 INFO [RS:1;75744186b12a:40257 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:29,735 INFO [RS:0;75744186b12a:38729 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:29,757 INFO [RS:2;75744186b12a:36797 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:29,759 INFO [RS:0;75744186b12a:38729 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:29,763 INFO [RS:1;75744186b12a:40257 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:29,775 INFO [RS:1;75744186b12a:40257 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:29,776 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,776 INFO [RS:2;75744186b12a:36797 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:29,776 INFO [RS:0;75744186b12a:38729 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:29,776 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,776 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,777 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:29,780 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:29,785 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:29,788 INFO [RS:2;75744186b12a:36797 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:29,788 INFO [RS:0;75744186b12a:38729 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:29,790 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,791 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,790 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,791 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:29,793 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,792 INFO [RS:1;75744186b12a:40257 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:29,793 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,793 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,793 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:29,793 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,794 DEBUG [RS:0;75744186b12a:38729 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,794 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:29,794 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,794 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:2;75744186b12a:36797 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:29,795 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,796 DEBUG [RS:1;75744186b12a:40257 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:29,807 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,807 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,38729,1733939007130-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,40257,1733939007263-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,808 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,36797,1733939007319-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:29,834 INFO [RS:1;75744186b12a:40257 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:29,834 INFO [RS:0;75744186b12a:38729 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:29,836 INFO [RS:2;75744186b12a:36797 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:29,837 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,36797,1733939007319-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,837 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,38729,1733939007130-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,837 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,837 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,838 INFO [RS:0;75744186b12a:38729 {}] regionserver.Replication(171): 75744186b12a,38729,1733939007130 started 2024-12-11T17:43:29,838 INFO [RS:2;75744186b12a:36797 {}] regionserver.Replication(171): 75744186b12a,36797,1733939007319 started 2024-12-11T17:43:29,837 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,40257,1733939007263-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,839 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,839 INFO [RS:1;75744186b12a:40257 {}] regionserver.Replication(171): 75744186b12a,40257,1733939007263 started 2024-12-11T17:43:29,862 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,862 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,862 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,40257,1733939007263, RpcServer on 75744186b12a/172.17.0.2:40257, sessionid=0x100160747060002 2024-12-11T17:43:29,863 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,36797,1733939007319, RpcServer on 75744186b12a/172.17.0.2:36797, sessionid=0x100160747060003 2024-12-11T17:43:29,864 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:29,864 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:29,864 DEBUG [RS:2;75744186b12a:36797 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,36797,1733939007319 2024-12-11T17:43:29,864 DEBUG [RS:1;75744186b12a:40257 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,40257,1733939007263 2024-12-11T17:43:29,864 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,36797,1733939007319' 2024-12-11T17:43:29,864 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,40257,1733939007263' 2024-12-11T17:43:29,864 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:29,864 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:29,865 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:29,865 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,38729,1733939007130, RpcServer on 75744186b12a/172.17.0.2:38729, sessionid=0x100160747060001 2024-12-11T17:43:29,865 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:29,865 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:29,866 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:29,866 DEBUG [RS:0;75744186b12a:38729 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,38729,1733939007130 2024-12-11T17:43:29,866 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,38729,1733939007130' 2024-12-11T17:43:29,866 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:29,866 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:29,866 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:29,866 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:29,866 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:29,866 DEBUG [RS:1;75744186b12a:40257 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,40257,1733939007263 2024-12-11T17:43:29,866 DEBUG [RS:2;75744186b12a:36797 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,36797,1733939007319 2024-12-11T17:43:29,866 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,36797,1733939007319' 2024-12-11T17:43:29,866 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,40257,1733939007263' 2024-12-11T17:43:29,867 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:29,867 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:29,867 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:29,868 DEBUG [RS:1;75744186b12a:40257 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:29,868 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:29,868 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:29,868 DEBUG [RS:2;75744186b12a:36797 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:29,868 DEBUG [RS:0;75744186b12a:38729 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,38729,1733939007130 2024-12-11T17:43:29,868 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,38729,1733939007130' 2024-12-11T17:43:29,868 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:29,868 DEBUG [RS:1;75744186b12a:40257 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:29,868 INFO [RS:1;75744186b12a:40257 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:29,868 INFO [RS:1;75744186b12a:40257 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:29,869 DEBUG [RS:0;75744186b12a:38729 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:29,869 DEBUG [RS:2;75744186b12a:36797 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:29,869 INFO [RS:2;75744186b12a:36797 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:29,869 INFO [RS:2;75744186b12a:36797 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:29,869 DEBUG [RS:0;75744186b12a:38729 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:29,869 INFO [RS:0;75744186b12a:38729 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:29,869 INFO [RS:0;75744186b12a:38729 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:29,973 INFO [RS:2;75744186b12a:36797 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T17:43:29,973 INFO [RS:0;75744186b12a:38729 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T17:43:29,973 INFO [RS:1;75744186b12a:40257 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-11T17:43:29,976 INFO [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C40257%2C1733939007263, suffix=, logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263, archiveDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs, maxLogs=32 2024-12-11T17:43:29,976 INFO [RS:2;75744186b12a:36797 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C36797%2C1733939007319, suffix=, logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,36797,1733939007319, archiveDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs, maxLogs=32 2024-12-11T17:43:29,976 INFO [RS:0;75744186b12a:38729 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C38729%2C1733939007130, suffix=, logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,38729,1733939007130, archiveDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs, maxLogs=32 2024-12-11T17:43:29,993 DEBUG [RS:0;75744186b12a:38729 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,38729,1733939007130/75744186b12a%2C38729%2C1733939007130.1733939009980, exclude list is [], retry=0 2024-12-11T17:43:29,994 DEBUG [RS:1;75744186b12a:40257 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263/75744186b12a%2C40257%2C1733939007263.1733939009980, exclude list is [], retry=0 2024-12-11T17:43:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34533,DS-6c88c14c-8541-496c-ac56-a988dea8456d,DISK] 2024-12-11T17:43:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40997,DS-581b03ce-375e-4b02-b0ce-172ebf015187,DISK] 2024-12-11T17:43:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34533,DS-6c88c14c-8541-496c-ac56-a988dea8456d,DISK] 2024-12-11T17:43:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40997,DS-581b03ce-375e-4b02-b0ce-172ebf015187,DISK] 2024-12-11T17:43:29,999 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34485,DS-867e9432-bbc8-479e-971b-c9cb96ab3687,DISK] 2024-12-11T17:43:30,000 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34485,DS-867e9432-bbc8-479e-971b-c9cb96ab3687,DISK] 2024-12-11T17:43:30,028 DEBUG [RS:2;75744186b12a:36797 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,36797,1733939007319/75744186b12a%2C36797%2C1733939007319.1733939009980, exclude list is [], retry=0 2024-12-11T17:43:30,030 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:30,031 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T17:43:30,032 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 2024-12-11T17:43:30,034 INFO [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263/75744186b12a%2C40257%2C1733939007263.1733939009980 2024-12-11T17:43:30,035 INFO [RS:0;75744186b12a:38729 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,38729,1733939007130/75744186b12a%2C38729%2C1733939007130.1733939009980 2024-12-11T17:43:30,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40997,DS-581b03ce-375e-4b02-b0ce-172ebf015187,DISK] 2024-12-11T17:43:30,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34485,DS-867e9432-bbc8-479e-971b-c9cb96ab3687,DISK] 2024-12-11T17:43:30,035 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34533,DS-6c88c14c-8541-496c-ac56-a988dea8456d,DISK] 2024-12-11T17:43:30,036 DEBUG [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45825:45825),(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:37283:37283)] 2024-12-11T17:43:30,037 DEBUG [RS:0;75744186b12a:38729 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38503:38503),(127.0.0.1/127.0.0.1:45825:45825),(127.0.0.1/127.0.0.1:37283:37283)] 2024-12-11T17:43:30,042 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:30,043 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:30,044 INFO [RS:2;75744186b12a:36797 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,36797,1733939007319/75744186b12a%2C36797%2C1733939007319.1733939009980 2024-12-11T17:43:30,048 DEBUG [RS:2;75744186b12a:36797 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:45825:45825),(127.0.0.1/127.0.0.1:38503:38503)] 2024-12-11T17:43:30,056 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:33460 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775696_1017] {}] datanode.DataXceiver(331): 127.0.0.1:34485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33460 dst: /127.0.0.1:34485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:30,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775696_1018 (size=32) 2024-12-11T17:43:30,065 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:30,066 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:30,069 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T17:43:30,071 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T17:43:30,072 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,072 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,073 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T17:43:30,075 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T17:43:30,075 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,076 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T17:43:30,079 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T17:43:30,079 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,080 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T17:43:30,083 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T17:43:30,083 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,085 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,085 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T17:43:30,087 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740 2024-12-11T17:43:30,088 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740 2024-12-11T17:43:30,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T17:43:30,100 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T17:43:30,111 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:30,115 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T17:43:30,144 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:30,145 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69858668, jitterRate=0.04097527265548706}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:30,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733939010066Initializing all the Stores at 1733939010068 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010068Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010069 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939010069Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010069Cleaning up temporary data from old regions at 1733939010101 (+32 ms)Region opened successfully at 1733939010147 (+46 ms) 2024-12-11T17:43:30,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T17:43:30,147 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T17:43:30,147 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T17:43:30,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T17:43:30,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T17:43:30,149 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:30,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733939010147Disabling compacts and flushes for region at 1733939010147Disabling writes for close at 1733939010148 (+1 ms)Writing region close event to WAL at 1733939010149 (+1 ms)Closed at 1733939010149 2024-12-11T17:43:30,154 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:30,154 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T17:43:30,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T17:43:30,178 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T17:43:30,186 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T17:43:30,339 DEBUG [75744186b12a:33505 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-11T17:43:30,346 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(204): Hosts are {75744186b12a=0} racks are {/default-rack=0} 2024-12-11T17:43:30,352 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T17:43:30,352 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T17:43:30,352 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T17:43:30,352 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T17:43:30,353 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T17:43:30,353 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T17:43:30,353 INFO [75744186b12a:33505 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T17:43:30,353 INFO [75744186b12a:33505 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T17:43:30,353 INFO [75744186b12a:33505 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T17:43:30,353 DEBUG [75744186b12a:33505 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T17:43:30,359 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=75744186b12a,40257,1733939007263 2024-12-11T17:43:30,365 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 75744186b12a,40257,1733939007263, state=OPENING 2024-12-11T17:43:30,433 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T17:43:30,450 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:30,450 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:30,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:30,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:30,451 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,452 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,452 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,452 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,454 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T17:43:30,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=75744186b12a,40257,1733939007263}] 2024-12-11T17:43:30,651 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T17:43:30,654 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54029, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T17:43:30,667 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T17:43:30,668 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-11T17:43:30,669 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-11T17:43:30,673 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C40257%2C1733939007263.meta, suffix=.meta, logDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263, archiveDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs, maxLogs=32 2024-12-11T17:43:30,697 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263/75744186b12a%2C40257%2C1733939007263.meta.1733939010675.meta, exclude list is [], retry=0 2024-12-11T17:43:30,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40997,DS-581b03ce-375e-4b02-b0ce-172ebf015187,DISK] 2024-12-11T17:43:30,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34485,DS-867e9432-bbc8-479e-971b-c9cb96ab3687,DISK] 2024-12-11T17:43:30,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:34533,DS-6c88c14c-8541-496c-ac56-a988dea8456d,DISK] 2024-12-11T17:43:30,706 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263/75744186b12a%2C40257%2C1733939007263.meta.1733939010675.meta 2024-12-11T17:43:30,706 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45825:45825),(127.0.0.1/127.0.0.1:37283:37283),(127.0.0.1/127.0.0.1:38503:38503)] 2024-12-11T17:43:30,706 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:30,708 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T17:43:30,710 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T17:43:30,714 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T17:43:30,718 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T17:43:30,718 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:30,718 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T17:43:30,718 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T17:43:30,721 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T17:43:30,722 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T17:43:30,723 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T17:43:30,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T17:43:30,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T17:43:30,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T17:43:30,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T17:43:30,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T17:43:30,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:30,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:30,731 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T17:43:30,732 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740 2024-12-11T17:43:30,734 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740 2024-12-11T17:43:30,736 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T17:43:30,736 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T17:43:30,736 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:30,738 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T17:43:30,740 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62083607, jitterRate=-0.07488216459751129}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:30,740 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T17:43:30,741 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733939010718Writing region info on filesystem at 1733939010719 (+1 ms)Initializing all the Stores at 1733939010720 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010721 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010721Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939010721Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939010721Cleaning up temporary data from old regions at 1733939010736 (+15 ms)Running coprocessor post-open hooks at 1733939010740 (+4 ms)Region opened successfully at 1733939010740 2024-12-11T17:43:30,746 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733939010626 2024-12-11T17:43:30,756 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T17:43:30,757 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T17:43:30,758 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=75744186b12a,40257,1733939007263 2024-12-11T17:43:30,760 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 75744186b12a,40257,1733939007263, state=OPEN 2024-12-11T17:43:30,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:30,791 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:30,791 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:30,791 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:30,791 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,791 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,791 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,791 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:30,792 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=75744186b12a,40257,1733939007263 2024-12-11T17:43:30,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T17:43:30,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=75744186b12a,40257,1733939007263 in 336 msec 2024-12-11T17:43:30,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T17:43:30,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 635 msec 2024-12-11T17:43:30,808 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:30,808 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T17:43:30,826 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T17:43:30,827 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=75744186b12a,40257,1733939007263, seqNum=-1] 2024-12-11T17:43:30,845 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T17:43:30,847 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58337, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T17:43:30,869 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.6270 sec 2024-12-11T17:43:30,869 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733939010869, completionTime=-1 2024-12-11T17:43:30,873 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T17:43:30,873 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T17:43:30,902 INFO [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T17:43:30,902 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733939070902 2024-12-11T17:43:30,902 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733939130902 2024-12-11T17:43:30,902 INFO [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 28 msec 2024-12-11T17:43:30,903 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-11T17:43:30,911 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,911 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,911 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,912 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-75744186b12a:33505, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,913 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,913 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,919 DEBUG [master/75744186b12a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T17:43:30,939 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.512sec 2024-12-11T17:43:30,940 INFO [master/75744186b12a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T17:43:30,941 INFO [master/75744186b12a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T17:43:30,942 INFO [master/75744186b12a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T17:43:30,943 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T17:43:30,943 INFO [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T17:43:30,943 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:30,944 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T17:43:30,948 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T17:43:30,948 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T17:43:30,949 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,33505,1733939006308-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:30,968 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37a71110, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:30,972 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-11T17:43:30,972 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-11T17:43:30,976 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 75744186b12a,33505,-1 for getting cluster id 2024-12-11T17:43:30,978 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T17:43:30,986 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'beea206b-0fd5-4cc0-a32a-451428069263' 2024-12-11T17:43:30,988 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T17:43:30,988 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "beea206b-0fd5-4cc0-a32a-451428069263" 2024-12-11T17:43:30,988 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15a4defb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:30,988 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [75744186b12a,33505,-1] 2024-12-11T17:43:30,991 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T17:43:30,993 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:30,994 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T17:43:30,997 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f2e356c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:30,998 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T17:43:31,006 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=75744186b12a,40257,1733939007263, seqNum=-1] 2024-12-11T17:43:31,007 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T17:43:31,009 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48758, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T17:43:31,028 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=75744186b12a,33505,1733939006308 2024-12-11T17:43:31,032 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T17:43:31,037 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 75744186b12a,33505,1733939006308 2024-12-11T17:43:31,040 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@76fac03 2024-12-11T17:43:31,041 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T17:43:31,044 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T17:43:31,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T17:43:31,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T17:43:31,060 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T17:43:31,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T17:43:31,064 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:31,067 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T17:43:31,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:31,078 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:31,078 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:31,081 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:59266 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:34485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59266 dst: /127.0.0.1:34485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:31,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-11T17:43:31,086 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:31,089 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 663db9d0ffa2d11f79aabc06e052d964, NAME => 'TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962 2024-12-11T17:43:31,099 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:31,099 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:31,141 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:49814 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49814 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:31,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-11T17:43:31,157 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:31,158 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:31,158 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 663db9d0ffa2d11f79aabc06e052d964, disabling compactions & flushes 2024-12-11T17:43:31,158 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,158 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,158 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. after waiting 0 ms 2024-12-11T17:43:31,158 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,159 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,159 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 663db9d0ffa2d11f79aabc06e052d964: Waiting for close lock at 1733939011158Disabling compacts and flushes for region at 1733939011158Disabling writes for close at 1733939011158Writing region close event to WAL at 1733939011158Closed at 1733939011158 2024-12-11T17:43:31,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775740_1008 (size=1189) 2024-12-11T17:43:31,169 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T17:43:31,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775757_1006 (size=196) 2024-12-11T17:43:31,175 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733939011169"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733939011169"}]},"ts":"1733939011169"} 2024-12-11T17:43:31,181 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T17:43:31,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775741_1008 (size=1189) 2024-12-11T17:43:31,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:31,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775756_1006 (size=196) 2024-12-11T17:43:31,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T17:43:31,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733939011184"}]},"ts":"1733939011184"} 2024-12-11T17:43:31,194 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T17:43:31,195 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {75744186b12a=0} racks are {/default-rack=0} 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T17:43:31,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T17:43:31,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T17:43:31,197 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T17:43:31,197 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T17:43:31,200 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=663db9d0ffa2d11f79aabc06e052d964, ASSIGN}] 2024-12-11T17:43:31,209 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=663db9d0ffa2d11f79aabc06e052d964, ASSIGN 2024-12-11T17:43:31,216 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=663db9d0ffa2d11f79aabc06e052d964, ASSIGN; state=OFFLINE, location=75744186b12a,40257,1733939007263; forceNewPlan=false, retain=false 2024-12-11T17:43:31,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775724_1010 (size=34) 2024-12-11T17:43:31,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775725_1010 (size=34) 2024-12-11T17:43:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775772_1004 (size=42) 2024-12-11T17:43:31,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775773_1004 (size=42) 2024-12-11T17:43:31,368 INFO [75744186b12a:33505 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T17:43:31,369 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=663db9d0ffa2d11f79aabc06e052d964, regionState=OPENING, regionLocation=75744186b12a,40257,1733939007263 2024-12-11T17:43:31,374 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=663db9d0ffa2d11f79aabc06e052d964, ASSIGN because future has completed 2024-12-11T17:43:31,375 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 663db9d0ffa2d11f79aabc06e052d964, server=75744186b12a,40257,1733939007263}] 2024-12-11T17:43:31,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:31,537 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,537 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 663db9d0ffa2d11f79aabc06e052d964, NAME => 'TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964.', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:31,538 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,538 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:31,538 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,538 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,541 INFO [StoreOpener-663db9d0ffa2d11f79aabc06e052d964-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,543 INFO [StoreOpener-663db9d0ffa2d11f79aabc06e052d964-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 663db9d0ffa2d11f79aabc06e052d964 columnFamilyName cf 2024-12-11T17:43:31,543 DEBUG [StoreOpener-663db9d0ffa2d11f79aabc06e052d964-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:31,545 INFO [StoreOpener-663db9d0ffa2d11f79aabc06e052d964-1 {}] regionserver.HStore(327): Store=663db9d0ffa2d11f79aabc06e052d964/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:31,545 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,546 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,547 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,548 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,548 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,551 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,568 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:31,570 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 663db9d0ffa2d11f79aabc06e052d964; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68488152, jitterRate=0.020552992820739746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T17:43:31,570 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:31,571 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 663db9d0ffa2d11f79aabc06e052d964: Running coprocessor pre-open hook at 1733939011538Writing region info on filesystem at 1733939011538Initializing all the Stores at 1733939011540 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939011540Cleaning up temporary data from old regions at 1733939011548 (+8 ms)Running coprocessor post-open hooks at 1733939011570 (+22 ms)Region opened successfully at 1733939011571 (+1 ms) 2024-12-11T17:43:31,574 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964., pid=6, masterSystemTime=1733939011530 2024-12-11T17:43:31,580 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,580 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,583 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=663db9d0ffa2d11f79aabc06e052d964, regionState=OPEN, openSeqNum=2, regionLocation=75744186b12a,40257,1733939007263 2024-12-11T17:43:31,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 663db9d0ffa2d11f79aabc06e052d964, server=75744186b12a,40257,1733939007263 because future has completed 2024-12-11T17:43:31,599 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T17:43:31,602 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 663db9d0ffa2d11f79aabc06e052d964, server=75744186b12a,40257,1733939007263 in 218 msec 2024-12-11T17:43:31,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T17:43:31,607 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=663db9d0ffa2d11f79aabc06e052d964, ASSIGN in 400 msec 2024-12-11T17:43:31,609 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T17:43:31,609 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733939011609"}]},"ts":"1733939011609"} 2024-12-11T17:43:31,613 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T17:43:31,615 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T17:43:31,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 563 msec 2024-12-11T17:43:31,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:31,703 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T17:43:31,703 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T17:43:31,705 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T17:43:31,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T17:43:31,713 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T17:43:31,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T17:43:31,729 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964., hostname=75744186b12a,40257,1733939007263, seqNum=2] 2024-12-11T17:43:31,744 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T17:43:31,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T17:43:31,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:31,762 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T17:43:31,765 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T17:43:31,766 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T17:43:31,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:31,935 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=40257 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T17:43:31,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:31,940 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 663db9d0ffa2d11f79aabc06e052d964 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T17:43:32,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/.tmp/cf/e181a9c376a9411399215196de9286ad is 36, key is row/cf:cq/1733939011732/Put/seqid=0 2024-12-11T17:43:32,014 WARN [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:32,014 WARN [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:32,024 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893504459_22 at /127.0.0.1:49870 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49870 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:32,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-11T17:43:32,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:32,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:32,435 WARN [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:32,435 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/.tmp/cf/e181a9c376a9411399215196de9286ad 2024-12-11T17:43:32,480 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/.tmp/cf/e181a9c376a9411399215196de9286ad as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/cf/e181a9c376a9411399215196de9286ad 2024-12-11T17:43:32,490 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/cf/e181a9c376a9411399215196de9286ad, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T17:43:32,497 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 663db9d0ffa2d11f79aabc06e052d964 in 556ms, sequenceid=5, compaction requested=false 2024-12-11T17:43:32,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-11T17:43:32,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 663db9d0ffa2d11f79aabc06e052d964: 2024-12-11T17:43:32,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T17:43:32,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T17:43:32,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T17:43:32,507 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 738 msec 2024-12-11T17:43:32,511 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 762 msec 2024-12-11T17:43:32,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33505 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:32,892 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T17:43:32,905 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T17:43:32,905 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T17:43:32,906 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:32,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,910 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,910 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T17:43:32,910 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T17:43:32,910 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1934782802, stopped=false 2024-12-11T17:43:32,911 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=75744186b12a,33505,1733939006308 2024-12-11T17:43:32,929 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:32,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:32,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:32,929 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:32,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:32,929 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:32,929 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:32,929 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:32,930 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T17:43:32,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:32,930 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T17:43:32,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:32,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:32,930 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:32,930 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:32,931 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,931 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,38729,1733939007130' ***** 2024-12-11T17:43:32,931 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:32,931 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,40257,1733939007263' ***** 2024-12-11T17:43:32,931 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:32,932 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,36797,1733939007319' ***** 2024-12-11T17:43:32,932 INFO [RS:0;75744186b12a:38729 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:32,932 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:32,932 INFO [RS:2;75744186b12a:36797 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:32,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:32,932 INFO [RS:0;75744186b12a:38729 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:32,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:32,932 INFO [RS:2;75744186b12a:36797 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:32,932 INFO [RS:1;75744186b12a:40257 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:32,932 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:32,932 INFO [RS:0;75744186b12a:38729 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:32,932 INFO [RS:2;75744186b12a:36797 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:32,932 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,38729,1733939007130 2024-12-11T17:43:32,932 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,36797,1733939007319 2024-12-11T17:43:32,933 INFO [RS:0;75744186b12a:38729 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:32,933 INFO [RS:2;75744186b12a:36797 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:32,933 INFO [RS:1;75744186b12a:40257 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:32,933 INFO [RS:0;75744186b12a:38729 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;75744186b12a:38729. 2024-12-11T17:43:32,933 INFO [RS:1;75744186b12a:40257 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:32,933 INFO [RS:2;75744186b12a:36797 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;75744186b12a:36797. 2024-12-11T17:43:32,933 DEBUG [RS:0;75744186b12a:38729 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:32,933 DEBUG [RS:2;75744186b12a:36797 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:32,933 DEBUG [RS:0;75744186b12a:38729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,933 DEBUG [RS:2;75744186b12a:36797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,933 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(3091): Received CLOSE for 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:32,933 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,38729,1733939007130; all regions closed. 2024-12-11T17:43:32,933 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,36797,1733939007319; all regions closed. 2024-12-11T17:43:32,933 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,40257,1733939007263 2024-12-11T17:43:32,934 INFO [RS:1;75744186b12a:40257 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:32,934 INFO [RS:1;75744186b12a:40257 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;75744186b12a:40257. 2024-12-11T17:43:32,934 DEBUG [RS:1;75744186b12a:40257 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:32,934 DEBUG [RS:1;75744186b12a:40257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,934 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 663db9d0ffa2d11f79aabc06e052d964, disabling compactions & flushes 2024-12-11T17:43:32,934 INFO [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,934 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,934 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. after waiting 0 ms 2024-12-11T17:43:32,934 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,934 INFO [RS:1;75744186b12a:40257 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:32,934 INFO [RS:1;75744186b12a:40257 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:32,935 INFO [RS:1;75744186b12a:40257 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:32,935 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T17:43:32,939 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-12-11T17:43:32,939 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1325): Online Regions={663db9d0ffa2d11f79aabc06e052d964=TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964., 1588230740=hbase:meta,,1.1588230740} 2024-12-11T17:43:32,940 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 663db9d0ffa2d11f79aabc06e052d964 2024-12-11T17:43:32,939 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T17:43:32,940 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T17:43:32,940 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T17:43:32,941 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T17:43:32,941 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T17:43:32,942 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T17:43:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741828_1016 (size=93) 2024-12-11T17:43:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_1073741828_1016 (size=93) 2024-12-11T17:43:32,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_1073741828_1016 (size=93) 2024-12-11T17:43:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_1073741826_1014 (size=93) 2024-12-11T17:43:32,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741826_1014 (size=93) 2024-12-11T17:43:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_1073741826_1014 (size=93) 2024-12-11T17:43:32,955 DEBUG [RS:0;75744186b12a:38729 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs 2024-12-11T17:43:32,955 DEBUG [RS:2;75744186b12a:36797 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs 2024-12-11T17:43:32,955 INFO [RS:0;75744186b12a:38729 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 75744186b12a%2C38729%2C1733939007130:(num 1733939009980) 2024-12-11T17:43:32,955 INFO [RS:2;75744186b12a:36797 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 75744186b12a%2C36797%2C1733939007319:(num 1733939009980) 2024-12-11T17:43:32,955 DEBUG [RS:0;75744186b12a:38729 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,955 DEBUG [RS:2;75744186b12a:36797 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:32,955 INFO [RS:2;75744186b12a:36797 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:32,955 INFO [RS:0;75744186b12a:38729 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:32,955 INFO [RS:0;75744186b12a:38729 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:32,955 INFO [RS:2;75744186b12a:36797 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:32,956 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:32,956 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:32,956 INFO [RS:0;75744186b12a:38729 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38729 2024-12-11T17:43:32,956 INFO [RS:2;75744186b12a:36797 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36797 2024-12-11T17:43:32,966 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/default/TestHBaseWalOnEC/663db9d0ffa2d11f79aabc06e052d964/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T17:43:32,968 INFO [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,968 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 663db9d0ffa2d11f79aabc06e052d964: Waiting for close lock at 1733939012934Running coprocessor pre-close hooks at 1733939012934Disabling compacts and flushes for region at 1733939012934Disabling writes for close at 1733939012934Writing region close event to WAL at 1733939012940 (+6 ms)Running coprocessor post-close hooks at 1733939012967 (+27 ms)Closed at 1733939012968 (+1 ms) 2024-12-11T17:43:32,969 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964. 2024-12-11T17:43:32,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,36797,1733939007319 2024-12-11T17:43:32,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:32,974 INFO [RS:2;75744186b12a:36797 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:32,985 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/info/901c6a986d114683b331a4f06741d958 is 153, key is TestHBaseWalOnEC,,1733939011045.663db9d0ffa2d11f79aabc06e052d964./info:regioninfo/1733939011582/Put/seqid=0 2024-12-11T17:43:32,989 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:32,989 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:32,995 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893504459_22 at /127.0.0.1:49884 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49884 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-11T17:43:33,005 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,006 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/info/901c6a986d114683b331a4f06741d958 2024-12-11T17:43:33,013 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:33,016 INFO [RS:0;75744186b12a:38729 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:33,016 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,38729,1733939007130 2024-12-11T17:43:33,017 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:33,019 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:33,038 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,38729,1733939007130] 2024-12-11T17:43:33,050 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/ns/73692c8367864926b0e35b74abd28e75 is 43, key is default/ns:d/1733939010852/Put/seqid=0 2024-12-11T17:43:33,053 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,053 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,058 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893504459_22 at /127.0.0.1:53308 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:34533:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53308 dst: /127.0.0.1:34533 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-11T17:43:33,069 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,069 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/ns/73692c8367864926b0e35b74abd28e75 2024-12-11T17:43:33,099 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,38729,1733939007130 already deleted, retry=false 2024-12-11T17:43:33,099 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,38729,1733939007130 expired; onlineServers=2 2024-12-11T17:43:33,099 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,36797,1733939007319] 2024-12-11T17:43:33,105 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/table/aa72044d59184d73b59ce3ac7a558044 is 52, key is TestHBaseWalOnEC/table:state/1733939011609/Put/seqid=0 2024-12-11T17:43:33,109 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,109 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,112 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,36797,1733939007319 already deleted, retry=false 2024-12-11T17:43:33,112 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,36797,1733939007319 expired; onlineServers=1 2024-12-11T17:43:33,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1893504459_22 at /127.0.0.1:49896 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49896 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-11T17:43:33,128 WARN [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,128 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/table/aa72044d59184d73b59ce3ac7a558044 2024-12-11T17:43:33,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36797-0x100160747060003, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,138 INFO [RS:2;75744186b12a:36797 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:33,138 INFO [RS:2;75744186b12a:36797 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,36797,1733939007319; zookeeper connection closed. 2024-12-11T17:43:33,139 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46959c7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46959c7f 2024-12-11T17:43:33,140 DEBUG [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T17:43:33,142 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/info/901c6a986d114683b331a4f06741d958 as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/info/901c6a986d114683b331a4f06741d958 2024-12-11T17:43:33,155 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/info/901c6a986d114683b331a4f06741d958, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T17:43:33,158 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/ns/73692c8367864926b0e35b74abd28e75 as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/ns/73692c8367864926b0e35b74abd28e75 2024-12-11T17:43:33,169 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/ns/73692c8367864926b0e35b74abd28e75, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T17:43:33,171 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/.tmp/table/aa72044d59184d73b59ce3ac7a558044 as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/table/aa72044d59184d73b59ce3ac7a558044 2024-12-11T17:43:33,180 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/table/aa72044d59184d73b59ce3ac7a558044, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T17:43:33,182 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 241ms, sequenceid=11, compaction requested=false 2024-12-11T17:43:33,182 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-11T17:43:33,192 INFO [RS:0;75744186b12a:38729 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:33,192 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,192 DEBUG [pool-65-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38729-0x100160747060001, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,192 INFO [RS:0;75744186b12a:38729 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,38729,1733939007130; zookeeper connection closed. 2024-12-11T17:43:33,195 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@32773d43 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@32773d43 2024-12-11T17:43:33,203 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T17:43:33,204 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T17:43:33,205 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:33,205 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733939012939Running coprocessor pre-close hooks at 1733939012939Disabling compacts and flushes for region at 1733939012939Disabling writes for close at 1733939012941 (+2 ms)Obtaining lock to block concurrent updates at 1733939012942 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1733939012942Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733939012943 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733939012946 (+3 ms)Flushing 1588230740/info: creating writer at 1733939012946Flushing 1588230740/info: appending metadata at 1733939012980 (+34 ms)Flushing 1588230740/info: closing flushed file at 1733939012980Flushing 1588230740/ns: creating writer at 1733939013025 (+45 ms)Flushing 1588230740/ns: appending metadata at 1733939013048 (+23 ms)Flushing 1588230740/ns: closing flushed file at 1733939013048Flushing 1588230740/table: creating writer at 1733939013081 (+33 ms)Flushing 1588230740/table: appending metadata at 1733939013104 (+23 ms)Flushing 1588230740/table: closing flushed file at 1733939013104Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@b7ff22f: reopening flushed file at 1733939013140 (+36 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@565a4729: reopening flushed file at 1733939013156 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@37423a94: reopening flushed file at 1733939013169 (+13 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 241ms, sequenceid=11, compaction requested=false at 1733939013182 (+13 ms)Writing region close event to WAL at 1733939013187 (+5 ms)Running coprocessor post-close hooks at 1733939013204 (+17 ms)Closed at 1733939013205 (+1 ms) 2024-12-11T17:43:33,205 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:33,340 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,40257,1733939007263; all regions closed. 2024-12-11T17:43:33,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741829_1019 (size=2751) 2024-12-11T17:43:33,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_1073741829_1019 (size=2751) 2024-12-11T17:43:33,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_1073741829_1019 (size=2751) 2024-12-11T17:43:33,348 DEBUG [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs 2024-12-11T17:43:33,348 INFO [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 75744186b12a%2C40257%2C1733939007263.meta:.meta(num 1733939010675) 2024-12-11T17:43:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_1073741827_1015 (size=1298) 2024-12-11T17:43:33,351 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/WALs/75744186b12a,40257,1733939007263/75744186b12a%2C40257%2C1733939007263.1733939009980 not finished, retry = 0 2024-12-11T17:43:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741827_1015 (size=1298) 2024-12-11T17:43:33,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_1073741827_1015 (size=1298) 2024-12-11T17:43:33,456 DEBUG [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/oldWALs 2024-12-11T17:43:33,456 INFO [RS:1;75744186b12a:40257 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 75744186b12a%2C40257%2C1733939007263:(num 1733939009980) 2024-12-11T17:43:33,456 DEBUG [RS:1;75744186b12a:40257 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:33,456 INFO [RS:1;75744186b12a:40257 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:33,456 INFO [RS:1;75744186b12a:40257 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:33,456 INFO [RS:1;75744186b12a:40257 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:33,456 INFO [RS:1;75744186b12a:40257 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:33,456 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:33,457 INFO [RS:1;75744186b12a:40257 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40257 2024-12-11T17:43:33,481 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:33,481 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,40257,1733939007263 2024-12-11T17:43:33,481 INFO [RS:1;75744186b12a:40257 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:33,482 ERROR [pool-71-thread-1-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$370/0x00007f7c748f4990@6e1d3f0d rejected from java.util.concurrent.ThreadPoolExecutor@2b1fbcaa[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-12-11T17:43:33,515 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,40257,1733939007263] 2024-12-11T17:43:33,523 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,40257,1733939007263 already deleted, retry=false 2024-12-11T17:43:33,524 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,40257,1733939007263 expired; onlineServers=0 2024-12-11T17:43:33,524 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '75744186b12a,33505,1733939006308' ***** 2024-12-11T17:43:33,524 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T17:43:33,524 INFO [M:0;75744186b12a:33505 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:33,524 INFO [M:0;75744186b12a:33505 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:33,524 DEBUG [M:0;75744186b12a:33505 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T17:43:33,524 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T17:43:33,524 DEBUG [M:0;75744186b12a:33505 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T17:43:33,524 DEBUG [master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939009525 {}] cleaner.HFileCleaner(306): Exit Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939009525,5,FailOnTimeoutGroup] 2024-12-11T17:43:33,524 DEBUG [master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939009531 {}] cleaner.HFileCleaner(306): Exit Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939009531,5,FailOnTimeoutGroup] 2024-12-11T17:43:33,525 INFO [M:0;75744186b12a:33505 {}] hbase.ChoreService(370): Chore service for: master/75744186b12a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:33,525 INFO [M:0;75744186b12a:33505 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:33,525 DEBUG [M:0;75744186b12a:33505 {}] master.HMaster(1795): Stopping service threads 2024-12-11T17:43:33,525 INFO [M:0;75744186b12a:33505 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T17:43:33,525 INFO [M:0;75744186b12a:33505 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T17:43:33,526 INFO [M:0;75744186b12a:33505 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T17:43:33,526 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T17:43:33,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:33,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:33,532 DEBUG [M:0;75744186b12a:33505 {}] zookeeper.ZKUtil(347): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T17:43:33,532 WARN [M:0;75744186b12a:33505 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T17:43:33,533 INFO [M:0;75744186b12a:33505 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/.lastflushedseqids 2024-12-11T17:43:33,544 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,544 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,564 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:59328 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:34485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59328 dst: /127.0.0.1:34485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-11T17:43:33,574 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,575 INFO [M:0;75744186b12a:33505 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T17:43:33,575 INFO [M:0;75744186b12a:33505 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T17:43:33,575 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T17:43:33,575 INFO [M:0;75744186b12a:33505 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:33,575 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:33,575 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T17:43:33,575 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:33,576 INFO [M:0;75744186b12a:33505 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.80 KB heapSize=34.11 KB 2024-12-11T17:43:33,614 DEBUG [M:0;75744186b12a:33505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc560ec60d8142839a48653cc3f5b5fa is 82, key is hbase:meta,,1/info:regioninfo/1733939010758/Put/seqid=0 2024-12-11T17:43:33,616 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,616 INFO [RS:1;75744186b12a:40257 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:33,616 DEBUG [pool-71-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40257-0x100160747060002, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:33,616 INFO [RS:1;75744186b12a:40257 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,40257,1733939007263; zookeeper connection closed. 2024-12-11T17:43:33,616 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1fd00ba7 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1fd00ba7 2024-12-11T17:43:33,616 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,616 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,617 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T17:43:33,623 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:59352 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:34485:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59352 dst: /127.0.0.1:34485 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-11T17:43:33,630 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,630 INFO [M:0;75744186b12a:33505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc560ec60d8142839a48653cc3f5b5fa 2024-12-11T17:43:33,661 DEBUG [M:0;75744186b12a:33505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a71820e97dc0455e937c187e910135d2 is 746, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733939011617/Put/seqid=0 2024-12-11T17:43:33,663 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,663 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,671 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:53342 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:34533:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53342 dst: /127.0.0.1:34533 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_-9223372036854775552_1037 (size=6437) 2024-12-11T17:43:33,676 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,677 INFO [M:0;75744186b12a:33505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.12 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a71820e97dc0455e937c187e910135d2 2024-12-11T17:43:33,703 DEBUG [M:0;75744186b12a:33505 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6299fcc15d84f2bbaf7fce6ccb2a9e9 is 69, key is 75744186b12a,36797,1733939007319/rs:state/1733939009584/Put/seqid=0 2024-12-11T17:43:33,708 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,708 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-11T17:43:33,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_2057258307_22 at /127.0.0.1:49906 [Receiving block BP-1719711233-172.17.0.2-1733939001831:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:40997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49906 dst: /127.0.0.1:40997 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-11T17:43:33,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-11T17:43:33,728 WARN [M:0;75744186b12a:33505 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-11T17:43:33,728 INFO [M:0;75744186b12a:33505 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6299fcc15d84f2bbaf7fce6ccb2a9e9 2024-12-11T17:43:33,738 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/dc560ec60d8142839a48653cc3f5b5fa as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc560ec60d8142839a48653cc3f5b5fa 2024-12-11T17:43:33,747 INFO [M:0;75744186b12a:33505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/dc560ec60d8142839a48653cc3f5b5fa, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T17:43:33,749 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a71820e97dc0455e937c187e910135d2 as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a71820e97dc0455e937c187e910135d2 2024-12-11T17:43:33,763 INFO [M:0;75744186b12a:33505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a71820e97dc0455e937c187e910135d2, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T17:43:33,766 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d6299fcc15d84f2bbaf7fce6ccb2a9e9 as hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d6299fcc15d84f2bbaf7fce6ccb2a9e9 2024-12-11T17:43:33,780 INFO [M:0;75744186b12a:33505 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d6299fcc15d84f2bbaf7fce6ccb2a9e9, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T17:43:33,782 INFO [M:0;75744186b12a:33505 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.80 KB/27447, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 207ms, sequenceid=72, compaction requested=false 2024-12-11T17:43:33,792 INFO [M:0;75744186b12a:33505 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:33,792 DEBUG [M:0;75744186b12a:33505 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733939013575Disabling compacts and flushes for region at 1733939013575Disabling writes for close at 1733939013575Obtaining lock to block concurrent updates at 1733939013576 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733939013576Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27447, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733939013576Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733939013578 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733939013578Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733939013613 (+35 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733939013613Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733939013637 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733939013660 (+23 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733939013660Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733939013685 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733939013702 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733939013702Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56bb5f51: reopening flushed file at 1733939013737 (+35 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@546a3bf4: reopening flushed file at 1733939013747 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@695be41d: reopening flushed file at 1733939013764 (+17 ms)Finished flush of dataSize ~26.80 KB/27447, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 207ms, sequenceid=72, compaction requested=false at 1733939013782 (+18 ms)Writing region close event to WAL at 1733939013792 (+10 ms)Closed at 1733939013792 2024-12-11T17:43:33,797 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(650): complete file /user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/MasterData/WALs/75744186b12a,33505,1733939006308/75744186b12a%2C33505%2C1733939006308.1733939008327 not finished, retry = 0 2024-12-11T17:43:33,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34533 is added to blk_1073741825_1011 (size=32650) 2024-12-11T17:43:33,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40997 is added to blk_1073741825_1011 (size=32650) 2024-12-11T17:43:33,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741825_1011 (size=32650) 2024-12-11T17:43:33,899 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:33,899 INFO [M:0;75744186b12a:33505 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T17:43:33,899 INFO [M:0;75744186b12a:33505 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33505 2024-12-11T17:43:33,900 INFO [M:0;75744186b12a:33505 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:34,021 INFO [M:0;75744186b12a:33505 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:34,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:34,021 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33505-0x100160747060000, quorum=127.0.0.1:52280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:34,074 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@554ba3d5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:34,077 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@64a37729{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:34,078 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:34,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3891561d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:34,078 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1646e48a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:34,083 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:34,083 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1719711233-172.17.0.2-1733939001831 (Datanode Uuid 7c15638e-fa93-42a2-8967-150d2dbd251b) service to localhost/127.0.0.1:45685 2024-12-11T17:43:34,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data5/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,085 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data6/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,086 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:34,086 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:34,086 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:34,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1327a94d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:34,092 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@674554fc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:34,092 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:34,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19093484{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:34,092 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@56f2bf79{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:34,095 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:34,095 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1719711233-172.17.0.2-1733939001831 (Datanode Uuid 318ea0c6-a00f-47ab-827b-a89609a8569a) service to localhost/127.0.0.1:45685 2024-12-11T17:43:34,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data3/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,096 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data4/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,096 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:34,096 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:34,097 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:34,104 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ec777b6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:34,104 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145f251e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:34,105 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:34,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e9e5394{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:34,105 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5a55babc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:34,108 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:34,108 WARN [BP-1719711233-172.17.0.2-1733939001831 heartbeating to localhost/127.0.0.1:45685 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1719711233-172.17.0.2-1733939001831 (Datanode Uuid 67c57c5e-8889-44b3-ae38-f932490a6922) service to localhost/127.0.0.1:45685 2024-12-11T17:43:34,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data1/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,109 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/cluster_60d764bd-4ab5-6f58-35db-ad52b08dad0c/data/data2/current/BP-1719711233-172.17.0.2-1733939001831 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:34,109 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:34,109 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:34,110 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:34,124 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44270346{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T17:43:34,125 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11292817{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:34,125 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:34,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2566da3f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:34,126 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@346b353e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:34,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T17:43:34,178 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T17:43:34,186 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=86 (was 156), OpenFileDescriptor=445 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=608 (was 647), ProcessCount=11 (was 11), AvailableMemoryMB=4122 (was 2975) - AvailableMemoryMB LEAK? - 2024-12-11T17:43:34,192 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=86, OpenFileDescriptor=445, MaxFileDescriptor=1048576, SystemLoadAverage=608, ProcessCount=11, AvailableMemoryMB=4122 2024-12-11T17:43:34,192 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.log.dir so I do NOT create it in target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d513cc93-9965-82e7-f63b-494c8bfda862/hadoop.tmp.dir so I do NOT create it in target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656, deleteOnExit=true 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/test.cache.data in system properties and HBase conf 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.tmp.dir in system properties and HBase conf 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir in system properties and HBase conf 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-11T17:43:34,193 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-11T17:43:34,194 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-11T17:43:34,194 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/nfs.dump.dir in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/java.io.tmpdir in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-11T17:43:34,195 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-11T17:43:34,675 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:34,683 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:34,687 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:34,688 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:34,688 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T17:43:34,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:34,689 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bb5145b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:34,690 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d625937{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:34,810 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4ef2d06{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/java.io.tmpdir/jetty-localhost-43241-hadoop-hdfs-3_4_1-tests_jar-_-any-21075555207907746/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T17:43:34,811 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ce28da4{HTTP/1.1, (http/1.1)}{localhost:43241} 2024-12-11T17:43:34,811 INFO [Time-limited test {}] server.Server(415): Started @15496ms 2024-12-11T17:43:35,086 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:35,090 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:35,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:35,091 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:35,091 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T17:43:35,092 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10b050c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:35,093 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b7d65bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:35,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@165ba57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/java.io.tmpdir/jetty-localhost-36855-hadoop-hdfs-3_4_1-tests_jar-_-any-10721694610054696905/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:35,223 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ed60080{HTTP/1.1, (http/1.1)}{localhost:36855} 2024-12-11T17:43:35,223 INFO [Time-limited test {}] server.Server(415): Started @15908ms 2024-12-11T17:43:35,226 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:35,283 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:35,286 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:35,287 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:35,287 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:35,287 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-11T17:43:35,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e4808da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:35,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@433bb18e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:35,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a6698d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/java.io.tmpdir/jetty-localhost-43085-hadoop-hdfs-3_4_1-tests_jar-_-any-9239072335843545437/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:35,391 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@627fdc4a{HTTP/1.1, (http/1.1)}{localhost:43085} 2024-12-11T17:43:35,392 INFO [Time-limited test {}] server.Server(415): Started @16076ms 2024-12-11T17:43:35,394 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:35,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-11T17:43:35,431 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-11T17:43:35,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-11T17:43:35,432 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-11T17:43:35,432 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-11T17:43:35,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34635d45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,AVAILABLE} 2024-12-11T17:43:35,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f79a540{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-11T17:43:35,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30ad1b96{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/java.io.tmpdir/jetty-localhost-34627-hadoop-hdfs-3_4_1-tests_jar-_-any-16143417806367451541/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:35,570 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1ec884b9{HTTP/1.1, (http/1.1)}{localhost:34627} 2024-12-11T17:43:35,570 INFO [Time-limited test {}] server.Server(415): Started @16255ms 2024-12-11T17:43:35,571 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-11T17:43:35,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:35,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:35,721 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:36,225 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-11T17:43:36,235 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:36,280 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:36,281 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-11T17:43:37,054 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data1/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,055 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data2/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,080 WARN [Thread-501 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5d035c3bdf1ad30 with lease ID 0x7c03e45fec8d75a1: Processing first storage report for DS-82640175-2f6d-4cb9-96ba-50b6bc31e343 from datanode DatanodeRegistration(127.0.0.1:44279, datanodeUuid=ae240149-aa00-4dfd-9236-4ebaecc5f02c, infoPort=33955, infoSecurePort=0, ipcPort=35235, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,103 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5d035c3bdf1ad30 with lease ID 0x7c03e45fec8d75a1: from storage DS-82640175-2f6d-4cb9-96ba-50b6bc31e343 node DatanodeRegistration(127.0.0.1:44279, datanodeUuid=ae240149-aa00-4dfd-9236-4ebaecc5f02c, infoPort=33955, infoSecurePort=0, ipcPort=35235, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5d035c3bdf1ad30 with lease ID 0x7c03e45fec8d75a1: Processing first storage report for DS-af9631f5-cf9e-4e77-add4-dc3a424baced from datanode DatanodeRegistration(127.0.0.1:44279, datanodeUuid=ae240149-aa00-4dfd-9236-4ebaecc5f02c, infoPort=33955, infoSecurePort=0, ipcPort=35235, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,111 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5d035c3bdf1ad30 with lease ID 0x7c03e45fec8d75a1: from storage DS-af9631f5-cf9e-4e77-add4-dc3a424baced node DatanodeRegistration(127.0.0.1:44279, datanodeUuid=ae240149-aa00-4dfd-9236-4ebaecc5f02c, infoPort=33955, infoSecurePort=0, ipcPort=35235, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,229 WARN [Thread-575 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data3/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,229 WARN [Thread-576 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data5/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,230 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data4/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,230 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data6/current/BP-1366890980-172.17.0.2-1733939014221/current, will proceed with Du for space computation calculation, 2024-12-11T17:43:37,257 WARN [Thread-546 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:37,259 WARN [Thread-524 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x897ad167d66d2891 with lease ID 0x7c03e45fec8d75a3: Processing first storage report for DS-cc593d13-9f62-419a-bf78-19f81d8de2cb from datanode DatanodeRegistration(127.0.0.1:35363, datanodeUuid=886a14d1-e86f-40f9-95e2-b471c33d681e, infoPort=36227, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x897ad167d66d2891 with lease ID 0x7c03e45fec8d75a3: from storage DS-cc593d13-9f62-419a-bf78-19f81d8de2cb node DatanodeRegistration(127.0.0.1:35363, datanodeUuid=886a14d1-e86f-40f9-95e2-b471c33d681e, infoPort=36227, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4eaf3a15489023e8 with lease ID 0x7c03e45fec8d75a2: Processing first storage report for DS-e023a437-c7ea-45ec-9840-2f9321c28142 from datanode DatanodeRegistration(127.0.0.1:38503, datanodeUuid=ffd3cbc3-7d51-40a2-85e1-a9ab266b7467, infoPort=39915, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4eaf3a15489023e8 with lease ID 0x7c03e45fec8d75a2: from storage DS-e023a437-c7ea-45ec-9840-2f9321c28142 node DatanodeRegistration(127.0.0.1:38503, datanodeUuid=ffd3cbc3-7d51-40a2-85e1-a9ab266b7467, infoPort=39915, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x897ad167d66d2891 with lease ID 0x7c03e45fec8d75a3: Processing first storage report for DS-ba395290-2243-4593-9482-0ede9bd79e53 from datanode DatanodeRegistration(127.0.0.1:35363, datanodeUuid=886a14d1-e86f-40f9-95e2-b471c33d681e, infoPort=36227, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x897ad167d66d2891 with lease ID 0x7c03e45fec8d75a3: from storage DS-ba395290-2243-4593-9482-0ede9bd79e53 node DatanodeRegistration(127.0.0.1:35363, datanodeUuid=886a14d1-e86f-40f9-95e2-b471c33d681e, infoPort=36227, infoSecurePort=0, ipcPort=35229, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4eaf3a15489023e8 with lease ID 0x7c03e45fec8d75a2: Processing first storage report for DS-2633d653-1ad2-44ab-a3bb-b17bad49cc7e from datanode DatanodeRegistration(127.0.0.1:38503, datanodeUuid=ffd3cbc3-7d51-40a2-85e1-a9ab266b7467, infoPort=39915, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221) 2024-12-11T17:43:37,262 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4eaf3a15489023e8 with lease ID 0x7c03e45fec8d75a2: from storage DS-2633d653-1ad2-44ab-a3bb-b17bad49cc7e node DatanodeRegistration(127.0.0.1:38503, datanodeUuid=ffd3cbc3-7d51-40a2-85e1-a9ab266b7467, infoPort=39915, infoSecurePort=0, ipcPort=39035, storageInfo=lv=-57;cid=testClusterID;nsid=1218621026;c=1733939014221), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-11T17:43:37,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2 2024-12-11T17:43:37,332 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/zookeeper_0, clientPort=63672, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-11T17:43:37,333 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63672 2024-12-11T17:43:37,333 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741825_1001 (size=7) 2024-12-11T17:43:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741825_1001 (size=7) 2024-12-11T17:43:37,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741825_1001 (size=7) 2024-12-11T17:43:37,361 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 with version=8 2024-12-11T17:43:37,361 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:45685/user/jenkins/test-data/27d87298-8e2b-a21f-d0b1-45cb3077f962/hbase-staging 2024-12-11T17:43:37,364 INFO [Time-limited test {}] client.ConnectionUtils(128): master/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:37,364 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-11T17:43:37,365 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:37,365 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42897 2024-12-11T17:43:37,367 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42897 connecting to ZooKeeper ensemble=127.0.0.1:63672 2024-12-11T17:43:37,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:428970x0, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:37,422 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42897-0x1001607750e0000 connected 2024-12-11T17:43:37,505 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,507 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,510 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:37,511 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762, hbase.cluster.distributed=false 2024-12-11T17:43:37,513 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:37,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42897 2024-12-11T17:43:37,519 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42897 2024-12-11T17:43:37,520 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42897 2024-12-11T17:43:37,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42897 2024-12-11T17:43:37,522 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42897 2024-12-11T17:43:37,537 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:37,537 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:37,539 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45861 2024-12-11T17:43:37,540 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45861 connecting to ZooKeeper ensemble=127.0.0.1:63672 2024-12-11T17:43:37,542 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,544 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,555 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:458610x0, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:37,556 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45861-0x1001607750e0001 connected 2024-12-11T17:43:37,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:37,560 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:37,564 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:37,565 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:37,567 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:37,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45861 2024-12-11T17:43:37,571 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45861 2024-12-11T17:43:37,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45861 2024-12-11T17:43:37,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45861 2024-12-11T17:43:37,572 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45861 2024-12-11T17:43:37,586 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:37,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,586 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,586 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:37,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,587 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:37,587 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:37,587 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:37,587 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43475 2024-12-11T17:43:37,589 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43475 connecting to ZooKeeper ensemble=127.0.0.1:63672 2024-12-11T17:43:37,589 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,592 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,605 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:434750x0, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:37,605 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43475-0x1001607750e0002 connected 2024-12-11T17:43:37,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:37,606 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:37,607 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:37,607 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:37,609 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:37,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43475 2024-12-11T17:43:37,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43475 2024-12-11T17:43:37,610 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43475 2024-12-11T17:43:37,611 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43475 2024-12-11T17:43:37,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43475 2024-12-11T17:43:37,631 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/75744186b12a:0 server-side Connection retries=45 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-11T17:43:37,632 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-11T17:43:37,633 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46677 2024-12-11T17:43:37,634 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46677 connecting to ZooKeeper ensemble=127.0.0.1:63672 2024-12-11T17:43:37,635 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,647 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:466770x0, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-11T17:43:37,648 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46677-0x1001607750e0003 connected 2024-12-11T17:43:37,648 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:37,648 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-11T17:43:37,650 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-11T17:43:37,651 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-11T17:43:37,653 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-11T17:43:37,654 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46677 2024-12-11T17:43:37,655 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46677 2024-12-11T17:43:37,659 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46677 2024-12-11T17:43:37,664 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46677 2024-12-11T17:43:37,665 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46677 2024-12-11T17:43:37,682 DEBUG [M:0;75744186b12a:42897 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;75744186b12a:42897 2024-12-11T17:43:37,682 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/75744186b12a,42897,1733939017363 2024-12-11T17:43:37,705 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,705 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,705 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,707 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/75744186b12a,42897,1733939017363 2024-12-11T17:43:37,753 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:37,753 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:37,753 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,753 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,755 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:37,755 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,757 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-11T17:43:37,758 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/75744186b12a,42897,1733939017363 from backup master directory 2024-12-11T17:43:37,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/75744186b12a,42897,1733939017363 2024-12-11T17:43:37,771 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,771 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,771 WARN [master/75744186b12a:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:37,771 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=75744186b12a,42897,1733939017363 2024-12-11T17:43:37,773 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-11T17:43:37,784 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/hbase.id] with ID: df468425-a391-440b-9948-a3e4ba242675 2024-12-11T17:43:37,784 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/.tmp/hbase.id 2024-12-11T17:43:37,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741826_1002 (size=42) 2024-12-11T17:43:37,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741826_1002 (size=42) 2024-12-11T17:43:37,810 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/.tmp/hbase.id]:[hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/hbase.id] 2024-12-11T17:43:37,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741826_1002 (size=42) 2024-12-11T17:43:37,853 INFO [master/75744186b12a:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-11T17:43:37,853 INFO [master/75744186b12a:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-11T17:43:37,858 INFO [master/75744186b12a:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 5ms. 2024-12-11T17:43:37,888 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,888 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,888 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,888 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:37,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741827_1003 (size=196) 2024-12-11T17:43:37,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741827_1003 (size=196) 2024-12-11T17:43:37,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741827_1003 (size=196) 2024-12-11T17:43:37,924 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T17:43:37,925 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-11T17:43:37,925 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T17:43:37,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741828_1004 (size=1189) 2024-12-11T17:43:37,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741828_1004 (size=1189) 2024-12-11T17:43:37,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741828_1004 (size=1189) 2024-12-11T17:43:37,943 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store 2024-12-11T17:43:37,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741829_1005 (size=34) 2024-12-11T17:43:37,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741829_1005 (size=34) 2024-12-11T17:43:37,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741829_1005 (size=34) 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T17:43:37,962 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:37,962 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:37,962 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733939017962Disabling compacts and flushes for region at 1733939017962Disabling writes for close at 1733939017962Writing region close event to WAL at 1733939017962Closed at 1733939017962 2024-12-11T17:43:37,963 WARN [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/.initializing 2024-12-11T17:43:37,963 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/WALs/75744186b12a,42897,1733939017363 2024-12-11T17:43:37,967 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C42897%2C1733939017363, suffix=, logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/WALs/75744186b12a,42897,1733939017363, archiveDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/oldWALs, maxLogs=10 2024-12-11T17:43:37,968 INFO [master/75744186b12a:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 75744186b12a%2C42897%2C1733939017363.1733939017967 2024-12-11T17:43:37,979 INFO [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/WALs/75744186b12a,42897,1733939017363/75744186b12a%2C42897%2C1733939017363.1733939017967 2024-12-11T17:43:37,984 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36227:36227),(127.0.0.1/127.0.0.1:39915:39915),(127.0.0.1/127.0.0.1:33955:33955)] 2024-12-11T17:43:37,984 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:37,985 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:37,985 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,985 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-11T17:43:37,989 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:37,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:37,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-11T17:43:37,992 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:37,992 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:37,993 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-11T17:43:37,995 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:37,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:37,996 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:37,998 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-11T17:43:37,998 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:37,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:37,999 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,000 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,002 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,003 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,003 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,004 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:38,006 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-11T17:43:38,009 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:38,010 INFO [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68200564, jitterRate=0.016267597675323486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:38,011 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733939017985Initializing all the Stores at 1733939017986 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939017986Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939017986Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939017986Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939017986Cleaning up temporary data from old regions at 1733939018003 (+17 ms)Region opened successfully at 1733939018011 (+8 ms) 2024-12-11T17:43:38,012 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-11T17:43:38,020 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1418eb7d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:38,022 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-11T17:43:38,022 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-11T17:43:38,022 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-11T17:43:38,023 INFO [master/75744186b12a:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-11T17:43:38,024 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-11T17:43:38,024 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-11T17:43:38,025 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-11T17:43:38,028 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-11T17:43:38,029 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-11T17:43:38,046 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-11T17:43:38,047 INFO [master/75744186b12a:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-11T17:43:38,048 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-11T17:43:38,054 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-11T17:43:38,055 INFO [master/75744186b12a:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-11T17:43:38,057 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-11T17:43:38,063 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-11T17:43:38,064 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-11T17:43:38,071 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-11T17:43:38,074 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-11T17:43:38,079 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-11T17:43:38,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:38,088 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:38,088 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:38,088 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:38,088 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,088 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,088 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,089 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=75744186b12a,42897,1733939017363, sessionid=0x1001607750e0000, setting cluster-up flag (Was=false) 2024-12-11T17:43:38,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,104 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,105 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,105 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,129 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-11T17:43:38,131 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=75744186b12a,42897,1733939017363 2024-12-11T17:43:38,146 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,146 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,146 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,171 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-11T17:43:38,173 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=75744186b12a,42897,1733939017363 2024-12-11T17:43:38,175 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-11T17:43:38,178 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:38,178 INFO [master/75744186b12a:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-11T17:43:38,178 INFO [master/75744186b12a:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-11T17:43:38,179 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 75744186b12a,42897,1733939017363 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-11T17:43:38,181 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:38,181 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:38,181 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:38,182 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/75744186b12a:0, corePoolSize=5, maxPoolSize=5 2024-12-11T17:43:38,182 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/75744186b12a:0, corePoolSize=10, maxPoolSize=10 2024-12-11T17:43:38,182 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,182 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:38,182 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733939048184 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-11T17:43:38,184 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,185 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:38,185 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-11T17:43:38,185 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-11T17:43:38,185 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-11T17:43:38,185 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-11T17:43:38,185 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-11T17:43:38,185 INFO [master/75744186b12a:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-11T17:43:38,186 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939018186,5,FailOnTimeoutGroup] 2024-12-11T17:43:38,186 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939018186,5,FailOnTimeoutGroup] 2024-12-11T17:43:38,186 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,186 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-11T17:43:38,186 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,186 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,186 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,187 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-11T17:43:38,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741831_1007 (size=1321) 2024-12-11T17:43:38,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741831_1007 (size=1321) 2024-12-11T17:43:38,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741831_1007 (size=1321) 2024-12-11T17:43:38,200 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-11T17:43:38,200 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 2024-12-11T17:43:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741832_1008 (size=32) 2024-12-11T17:43:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741832_1008 (size=32) 2024-12-11T17:43:38,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741832_1008 (size=32) 2024-12-11T17:43:38,216 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:38,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T17:43:38,219 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T17:43:38,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T17:43:38,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T17:43:38,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,223 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T17:43:38,225 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T17:43:38,225 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,225 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,226 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T17:43:38,228 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T17:43:38,228 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,229 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T17:43:38,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740 2024-12-11T17:43:38,231 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740 2024-12-11T17:43:38,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T17:43:38,234 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T17:43:38,235 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:38,236 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T17:43:38,239 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:38,240 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70412336, jitterRate=0.049225568771362305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:38,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733939018216Initializing all the Stores at 1733939018217 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018217Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018218 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939018218Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018218Cleaning up temporary data from old regions at 1733939018234 (+16 ms)Region opened successfully at 1733939018241 (+7 ms) 2024-12-11T17:43:38,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T17:43:38,242 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T17:43:38,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T17:43:38,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T17:43:38,242 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T17:43:38,242 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:38,243 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733939018242Disabling compacts and flushes for region at 1733939018242Disabling writes for close at 1733939018242Writing region close event to WAL at 1733939018242Closed at 1733939018242 2024-12-11T17:43:38,245 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:38,245 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-11T17:43:38,245 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-11T17:43:38,248 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T17:43:38,249 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-11T17:43:38,269 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(746): ClusterId : df468425-a391-440b-9948-a3e4ba242675 2024-12-11T17:43:38,269 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(746): ClusterId : df468425-a391-440b-9948-a3e4ba242675 2024-12-11T17:43:38,269 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:38,269 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:38,295 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(746): ClusterId : df468425-a391-440b-9948-a3e4ba242675 2024-12-11T17:43:38,295 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-11T17:43:38,295 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:38,295 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:38,295 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:38,295 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:38,313 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-11T17:43:38,313 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-11T17:43:38,314 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:38,314 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:38,314 DEBUG [RS:1;75744186b12a:43475 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75cb280d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:38,314 DEBUG [RS:0;75744186b12a:45861 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8066ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:38,331 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-11T17:43:38,331 DEBUG [RS:2;75744186b12a:46677 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4603f324, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=75744186b12a/172.17.0.2:0 2024-12-11T17:43:38,332 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;75744186b12a:45861 2024-12-11T17:43:38,332 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;75744186b12a:43475 2024-12-11T17:43:38,332 INFO [RS:0;75744186b12a:45861 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:38,332 INFO [RS:0;75744186b12a:45861 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:38,332 INFO [RS:1;75744186b12a:43475 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:38,332 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:38,332 INFO [RS:1;75744186b12a:43475 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:38,332 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:38,336 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,42897,1733939017363 with port=45861, startcode=1733939017536 2024-12-11T17:43:38,337 DEBUG [RS:0;75744186b12a:45861 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:38,339 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,42897,1733939017363 with port=43475, startcode=1733939017586 2024-12-11T17:43:38,339 DEBUG [RS:1;75744186b12a:43475 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:38,343 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38477, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:38,343 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,43475,1733939017586 2024-12-11T17:43:38,343 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53561, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:38,344 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(517): Registering regionserver=75744186b12a,43475,1733939017586 2024-12-11T17:43:38,346 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,45861,1733939017536 2024-12-11T17:43:38,346 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(517): Registering regionserver=75744186b12a,45861,1733939017536 2024-12-11T17:43:38,346 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 2024-12-11T17:43:38,346 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40023 2024-12-11T17:43:38,346 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:38,349 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;75744186b12a:46677 2024-12-11T17:43:38,349 INFO [RS:2;75744186b12a:46677 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-11T17:43:38,349 INFO [RS:2;75744186b12a:46677 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-11T17:43:38,349 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-11T17:43:38,349 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 2024-12-11T17:43:38,349 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40023 2024-12-11T17:43:38,349 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:38,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:38,355 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(2659): reportForDuty to master=75744186b12a,42897,1733939017363 with port=46677, startcode=1733939017631 2024-12-11T17:43:38,355 DEBUG [RS:2;75744186b12a:46677 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-11T17:43:38,357 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43755, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-11T17:43:38,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 75744186b12a,46677,1733939017631 2024-12-11T17:43:38,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] master.ServerManager(517): Registering regionserver=75744186b12a,46677,1733939017631 2024-12-11T17:43:38,361 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 2024-12-11T17:43:38,361 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40023 2024-12-11T17:43:38,361 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-11T17:43:38,399 DEBUG [RS:1;75744186b12a:43475 {}] zookeeper.ZKUtil(111): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,43475,1733939017586 2024-12-11T17:43:38,399 WARN [RS:1;75744186b12a:43475 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:38,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,43475,1733939017586] 2024-12-11T17:43:38,399 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,45861,1733939017536] 2024-12-11T17:43:38,399 INFO [RS:1;75744186b12a:43475 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T17:43:38,399 DEBUG [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,43475,1733939017586 2024-12-11T17:43:38,400 WARN [75744186b12a:42897 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-11T17:43:38,408 DEBUG [RS:2;75744186b12a:46677 {}] zookeeper.ZKUtil(111): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,46677,1733939017631 2024-12-11T17:43:38,408 WARN [RS:2;75744186b12a:46677 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:38,408 INFO [RS:2;75744186b12a:46677 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T17:43:38,408 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,46677,1733939017631 2024-12-11T17:43:38,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:38,409 DEBUG [RS:0;75744186b12a:45861 {}] zookeeper.ZKUtil(111): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/75744186b12a,45861,1733939017536 2024-12-11T17:43:38,409 WARN [RS:0;75744186b12a:45861 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-11T17:43:38,409 INFO [RS:0;75744186b12a:45861 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T17:43:38,409 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,45861,1733939017536 2024-12-11T17:43:38,410 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [75744186b12a,46677,1733939017631] 2024-12-11T17:43:38,411 INFO [RS:1;75744186b12a:43475 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:38,415 INFO [RS:1;75744186b12a:43475 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:38,416 INFO [RS:2;75744186b12a:46677 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:38,419 INFO [RS:1;75744186b12a:43475 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:38,419 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,421 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:38,421 INFO [RS:2;75744186b12a:46677 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:38,422 INFO [RS:1;75744186b12a:43475 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:38,423 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,423 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,424 DEBUG [RS:1;75744186b12a:43475 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,425 INFO [RS:2;75744186b12a:46677 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:38,426 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,427 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:38,427 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,427 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,428 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,428 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,428 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,428 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,43475,1733939017586-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:38,429 INFO [RS:2;75744186b12a:46677 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:38,429 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,429 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,429 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,429 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,429 INFO [RS:0;75744186b12a:45861 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,430 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,431 DEBUG [RS:2;75744186b12a:46677 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,436 INFO [RS:0;75744186b12a:45861 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-11T17:43:38,436 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:0;75744186b12a:45861 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-11T17:43:38,437 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,46677,1733939017631-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:38,437 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,438 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-11T17:43:38,439 INFO [RS:0;75744186b12a:45861 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-11T17:43:38,439 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/75744186b12a:0, corePoolSize=2, maxPoolSize=2 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,440 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/75744186b12a:0, corePoolSize=1, maxPoolSize=1 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,441 DEBUG [RS:0;75744186b12a:45861 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0, corePoolSize=3, maxPoolSize=3 2024-12-11T17:43:38,450 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,450 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,451 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,451 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,451 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,451 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,45861,1733939017536-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:38,455 INFO [RS:2;75744186b12a:46677 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:38,456 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,46677,1733939017631-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,456 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,456 INFO [RS:2;75744186b12a:46677 {}] regionserver.Replication(171): 75744186b12a,46677,1733939017631 started 2024-12-11T17:43:38,458 INFO [RS:1;75744186b12a:43475 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:38,458 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,43475,1733939017586-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,459 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,459 INFO [RS:1;75744186b12a:43475 {}] regionserver.Replication(171): 75744186b12a,43475,1733939017586 started 2024-12-11T17:43:38,474 INFO [RS:0;75744186b12a:45861 {}] regionserver.HeapMemoryManager(213): Starting, tuneOn=false 2024-12-11T17:43:38,474 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,474 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,46677,1733939017631, RpcServer on 75744186b12a/172.17.0.2:46677, sessionid=0x1001607750e0003 2024-12-11T17:43:38,474 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,45861,1733939017536-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,474 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:38,474 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,474 DEBUG [RS:2;75744186b12a:46677 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,46677,1733939017631 2024-12-11T17:43:38,474 INFO [RS:0;75744186b12a:45861 {}] regionserver.Replication(171): 75744186b12a,45861,1733939017536 started 2024-12-11T17:43:38,474 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,46677,1733939017631' 2024-12-11T17:43:38,474 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:38,475 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,46677,1733939017631 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,46677,1733939017631' 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:38,476 DEBUG [RS:2;75744186b12a:46677 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:38,477 DEBUG [RS:2;75744186b12a:46677 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:38,477 INFO [RS:2;75744186b12a:46677 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:38,477 INFO [RS:2;75744186b12a:46677 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:38,478 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,478 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,43475,1733939017586, RpcServer on 75744186b12a/172.17.0.2:43475, sessionid=0x1001607750e0002 2024-12-11T17:43:38,478 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:38,478 DEBUG [RS:1;75744186b12a:43475 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,43475,1733939017586 2024-12-11T17:43:38,478 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,43475,1733939017586' 2024-12-11T17:43:38,478 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:38,479 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:38,480 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:38,480 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:38,480 DEBUG [RS:1;75744186b12a:43475 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,43475,1733939017586 2024-12-11T17:43:38,480 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,43475,1733939017586' 2024-12-11T17:43:38,480 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:38,481 DEBUG [RS:1;75744186b12a:43475 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:38,482 DEBUG [RS:1;75744186b12a:43475 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:38,482 INFO [RS:1;75744186b12a:43475 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:38,482 INFO [RS:1;75744186b12a:43475 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:38,496 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,496 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1482): Serving as 75744186b12a,45861,1733939017536, RpcServer on 75744186b12a/172.17.0.2:45861, sessionid=0x1001607750e0001 2024-12-11T17:43:38,496 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-11T17:43:38,497 DEBUG [RS:0;75744186b12a:45861 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 75744186b12a,45861,1733939017536 2024-12-11T17:43:38,497 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,45861,1733939017536' 2024-12-11T17:43:38,497 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-11T17:43:38,497 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-11T17:43:38,499 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-11T17:43:38,499 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-11T17:43:38,499 DEBUG [RS:0;75744186b12a:45861 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 75744186b12a,45861,1733939017536 2024-12-11T17:43:38,499 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '75744186b12a,45861,1733939017536' 2024-12-11T17:43:38,499 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-11T17:43:38,500 DEBUG [RS:0;75744186b12a:45861 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-11T17:43:38,501 DEBUG [RS:0;75744186b12a:45861 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-11T17:43:38,501 INFO [RS:0;75744186b12a:45861 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-11T17:43:38,501 INFO [RS:0;75744186b12a:45861 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-11T17:43:38,580 INFO [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C46677%2C1733939017631, suffix=, logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,46677,1733939017631, archiveDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs, maxLogs=32 2024-12-11T17:43:38,582 INFO [RS:2;75744186b12a:46677 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 75744186b12a%2C46677%2C1733939017631.1733939018582 2024-12-11T17:43:38,585 INFO [RS:1;75744186b12a:43475 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C43475%2C1733939017586, suffix=, logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,43475,1733939017586, archiveDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs, maxLogs=32 2024-12-11T17:43:38,587 INFO [RS:1;75744186b12a:43475 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 75744186b12a%2C43475%2C1733939017586.1733939018587 2024-12-11T17:43:38,603 INFO [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,46677,1733939017631/75744186b12a%2C46677%2C1733939017631.1733939018582 2024-12-11T17:43:38,604 INFO [RS:0;75744186b12a:45861 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C45861%2C1733939017536, suffix=, logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,45861,1733939017536, archiveDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs, maxLogs=32 2024-12-11T17:43:38,606 INFO [RS:0;75744186b12a:45861 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 75744186b12a%2C45861%2C1733939017536.1733939018605 2024-12-11T17:43:38,608 INFO [RS:1;75744186b12a:43475 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,43475,1733939017586/75744186b12a%2C43475%2C1733939017586.1733939018587 2024-12-11T17:43:38,631 DEBUG [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36227:36227),(127.0.0.1/127.0.0.1:39915:39915),(127.0.0.1/127.0.0.1:33955:33955)] 2024-12-11T17:43:38,638 DEBUG [RS:1;75744186b12a:43475 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36227:36227),(127.0.0.1/127.0.0.1:39915:39915),(127.0.0.1/127.0.0.1:33955:33955)] 2024-12-11T17:43:38,644 INFO [RS:0;75744186b12a:45861 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,45861,1733939017536/75744186b12a%2C45861%2C1733939017536.1733939018605 2024-12-11T17:43:38,650 DEBUG [75744186b12a:42897 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-12-11T17:43:38,651 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=75744186b12a,46677,1733939017631 2024-12-11T17:43:38,653 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 75744186b12a,46677,1733939017631, state=OPENING 2024-12-11T17:43:38,663 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-11T17:43:38,673 DEBUG [RS:0;75744186b12a:45861 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33955:33955),(127.0.0.1/127.0.0.1:36227:36227),(127.0.0.1/127.0.0.1:39915:39915)] 2024-12-11T17:43:38,679 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,680 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,680 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,681 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,681 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,681 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:38,683 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,684 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-11T17:43:38,684 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=75744186b12a,46677,1733939017631}] 2024-12-11T17:43:38,685 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,839 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T17:43:38,841 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49601, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T17:43:38,846 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-11T17:43:38,847 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-11T17:43:38,850 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=75744186b12a%2C46677%2C1733939017631.meta, suffix=.meta, logDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,46677,1733939017631, archiveDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs, maxLogs=32 2024-12-11T17:43:38,851 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 75744186b12a%2C46677%2C1733939017631.meta.1733939018851.meta 2024-12-11T17:43:38,862 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/WALs/75744186b12a,46677,1733939017631/75744186b12a%2C46677%2C1733939017631.meta.1733939018851.meta 2024-12-11T17:43:38,864 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36227:36227),(127.0.0.1/127.0.0.1:39915:39915),(127.0.0.1/127.0.0.1:33955:33955)] 2024-12-11T17:43:38,871 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:38,872 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-11T17:43:38,872 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-11T17:43:38,872 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-11T17:43:38,873 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-11T17:43:38,873 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:38,873 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-11T17:43:38,873 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-11T17:43:38,876 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-11T17:43:38,878 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-11T17:43:38,879 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,879 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,880 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-11T17:43:38,884 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-11T17:43:38,884 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,885 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-11T17:43:38,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-11T17:43:38,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-11T17:43:38,890 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-11T17:43:38,891 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:38,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-11T17:43:38,892 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-11T17:43:38,893 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740 2024-12-11T17:43:38,896 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740 2024-12-11T17:43:38,898 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-11T17:43:38,898 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-11T17:43:38,899 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-11T17:43:38,901 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-11T17:43:38,903 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69039846, jitterRate=0.028773874044418335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-11T17:43:38,903 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-11T17:43:38,905 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733939018873Writing region info on filesystem at 1733939018873Initializing all the Stores at 1733939018875 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018875Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018876 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939018876Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733939018876Cleaning up temporary data from old regions at 1733939018898 (+22 ms)Running coprocessor post-open hooks at 1733939018903 (+5 ms)Region opened successfully at 1733939018904 (+1 ms) 2024-12-11T17:43:38,912 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733939018839 2024-12-11T17:43:38,916 DEBUG [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-11T17:43:38,916 INFO [RS_OPEN_META-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-11T17:43:38,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=75744186b12a,46677,1733939017631 2024-12-11T17:43:38,920 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 75744186b12a,46677,1733939017631, state=OPEN 2024-12-11T17:43:38,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:38,946 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:38,946 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:38,946 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-11T17:43:38,946 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=75744186b12a,46677,1733939017631 2024-12-11T17:43:38,946 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,946 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,946 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,947 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-11T17:43:38,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-11T17:43:38,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=75744186b12a,46677,1733939017631 in 262 msec 2024-12-11T17:43:38,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-11T17:43:38,959 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 708 msec 2024-12-11T17:43:38,961 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-11T17:43:38,961 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-11T17:43:38,963 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T17:43:38,963 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=75744186b12a,46677,1733939017631, seqNum=-1] 2024-12-11T17:43:38,963 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T17:43:38,965 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35081, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T17:43:38,976 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 797 msec 2024-12-11T17:43:38,976 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733939018976, completionTime=-1 2024-12-11T17:43:38,977 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-11T17:43:38,977 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-11T17:43:38,979 INFO [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-11T17:43:38,980 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733939078980 2024-12-11T17:43:38,980 INFO [master/75744186b12a:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733939138980 2024-12-11T17:43:38,980 INFO [master/75744186b12a:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 3 msec 2024-12-11T17:43:38,980 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,981 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,981 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,981 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-75744186b12a:42897, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,981 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,981 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:38,984 DEBUG [master/75744186b12a:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-11T17:43:38,988 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.216sec 2024-12-11T17:43:38,988 INFO [master/75744186b12a:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-11T17:43:38,988 INFO [master/75744186b12a:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-11T17:43:38,989 INFO [master/75744186b12a:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-11T17:43:38,989 INFO [master/75744186b12a:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-11T17:43:38,989 INFO [master/75744186b12a:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-11T17:43:38,989 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-11T17:43:38,989 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-11T17:43:38,992 DEBUG [master/75744186b12a:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-11T17:43:38,992 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-11T17:43:38,992 INFO [master/75744186b12a:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=75744186b12a,42897,1733939017363-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-11T17:43:39,073 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@409b304d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:39,074 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 75744186b12a,42897,-1 for getting cluster id 2024-12-11T17:43:39,074 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-11T17:43:39,076 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'df468425-a391-440b-9948-a3e4ba242675' 2024-12-11T17:43:39,076 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-11T17:43:39,076 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "df468425-a391-440b-9948-a3e4ba242675" 2024-12-11T17:43:39,077 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@772172b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:39,077 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [75744186b12a,42897,-1] 2024-12-11T17:43:39,077 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-11T17:43:39,078 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:39,080 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40908, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-11T17:43:39,081 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@57b0a79a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-11T17:43:39,082 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-11T17:43:39,084 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=75744186b12a,46677,1733939017631, seqNum=-1] 2024-12-11T17:43:39,084 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T17:43:39,087 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59790, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T17:43:39,092 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=75744186b12a,42897,1733939017363 2024-12-11T17:43:39,094 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-11T17:43:39,096 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 75744186b12a,42897,1733939017363 2024-12-11T17:43:39,096 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6d6ab13a 2024-12-11T17:43:39,096 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-11T17:43:39,098 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-11T17:43:39,104 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-11T17:43:39,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-11T17:43:39,109 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-11T17:43:39,109 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:39,109 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-11T17:43:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:39,114 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-11T17:43:39,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741837_1013 (size=392) 2024-12-11T17:43:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741837_1013 (size=392) 2024-12-11T17:43:39,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741837_1013 (size=392) 2024-12-11T17:43:39,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:39,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:39,559 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 799cfdaf9cb42a437181e24950668ac1, NAME => 'TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762 2024-12-11T17:43:39,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741838_1014 (size=51) 2024-12-11T17:43:39,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741838_1014 (size=51) 2024-12-11T17:43:39,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741838_1014 (size=51) 2024-12-11T17:43:39,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:39,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 799cfdaf9cb42a437181e24950668ac1, disabling compactions & flushes 2024-12-11T17:43:39,623 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:39,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:39,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. after waiting 0 ms 2024-12-11T17:43:39,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:39,623 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:39,624 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 799cfdaf9cb42a437181e24950668ac1: Waiting for close lock at 1733939019623Disabling compacts and flushes for region at 1733939019623Disabling writes for close at 1733939019623Writing region close event to WAL at 1733939019623Closed at 1733939019623 2024-12-11T17:43:39,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-11T17:43:39,636 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733939019636"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733939019636"}]},"ts":"1733939019636"} 2024-12-11T17:43:39,641 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-11T17:43:39,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-11T17:43:39,645 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733939019644"}]},"ts":"1733939019644"} 2024-12-11T17:43:39,650 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-11T17:43:39,651 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {75744186b12a=0} racks are {/default-rack=0} 2024-12-11T17:43:39,652 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-11T17:43:39,652 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-11T17:43:39,652 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-11T17:43:39,652 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-11T17:43:39,653 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-11T17:43:39,653 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-11T17:43:39,653 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-11T17:43:39,653 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-11T17:43:39,653 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-11T17:43:39,653 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-11T17:43:39,653 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1, ASSIGN}] 2024-12-11T17:43:39,659 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1, ASSIGN 2024-12-11T17:43:39,662 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1, ASSIGN; state=OFFLINE, location=75744186b12a,45861,1733939017536; forceNewPlan=false, retain=false 2024-12-11T17:43:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:39,812 INFO [75744186b12a:42897 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-11T17:43:39,813 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=799cfdaf9cb42a437181e24950668ac1, regionState=OPENING, regionLocation=75744186b12a,45861,1733939017536 2024-12-11T17:43:39,817 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1, ASSIGN because future has completed 2024-12-11T17:43:39,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 799cfdaf9cb42a437181e24950668ac1, server=75744186b12a,45861,1733939017536}] 2024-12-11T17:43:39,972 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-11T17:43:39,975 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58149, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-11T17:43:39,986 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:39,986 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 799cfdaf9cb42a437181e24950668ac1, NAME => 'TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.', STARTKEY => '', ENDKEY => ''} 2024-12-11T17:43:39,987 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,987 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-11T17:43:39,987 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,987 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,991 INFO [StoreOpener-799cfdaf9cb42a437181e24950668ac1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,995 INFO [StoreOpener-799cfdaf9cb42a437181e24950668ac1-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 799cfdaf9cb42a437181e24950668ac1 columnFamilyName cf 2024-12-11T17:43:39,995 DEBUG [StoreOpener-799cfdaf9cb42a437181e24950668ac1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-11T17:43:39,996 INFO [StoreOpener-799cfdaf9cb42a437181e24950668ac1-1 {}] regionserver.HStore(327): Store=799cfdaf9cb42a437181e24950668ac1/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-11T17:43:39,997 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,998 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:39,999 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,000 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,000 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,002 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,009 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-11T17:43:40,009 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 799cfdaf9cb42a437181e24950668ac1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69226426, jitterRate=0.03155413269996643}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-11T17:43:40,010 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,011 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 799cfdaf9cb42a437181e24950668ac1: Running coprocessor pre-open hook at 1733939019988Writing region info on filesystem at 1733939019988Initializing all the Stores at 1733939019989 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733939019989Cleaning up temporary data from old regions at 1733939020000 (+11 ms)Running coprocessor post-open hooks at 1733939020010 (+10 ms)Region opened successfully at 1733939020011 (+1 ms) 2024-12-11T17:43:40,013 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1., pid=6, masterSystemTime=1733939019972 2024-12-11T17:43:40,017 DEBUG [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,017 INFO [RS_OPEN_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,022 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=799cfdaf9cb42a437181e24950668ac1, regionState=OPEN, openSeqNum=2, regionLocation=75744186b12a,45861,1733939017536 2024-12-11T17:43:40,034 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42897 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=75744186b12a,45861,1733939017536, table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-11T17:43:40,036 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 799cfdaf9cb42a437181e24950668ac1, server=75744186b12a,45861,1733939017536 because future has completed 2024-12-11T17:43:40,046 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-11T17:43:40,047 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 799cfdaf9cb42a437181e24950668ac1, server=75744186b12a,45861,1733939017536 in 221 msec 2024-12-11T17:43:40,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-11T17:43:40,051 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=799cfdaf9cb42a437181e24950668ac1, ASSIGN in 392 msec 2024-12-11T17:43:40,053 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-11T17:43:40,054 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733939020053"}]},"ts":"1733939020053"} 2024-12-11T17:43:40,058 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-11T17:43:40,060 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-11T17:43:40,064 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 956 msec 2024-12-11T17:43:40,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-11T17:43:40,252 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T17:43:40,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-11T17:43:40,253 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T17:43:40,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-11T17:43:40,256 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-11T17:43:40,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-11T17:43:40,260 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1., hostname=75744186b12a,45861,1733939017536, seqNum=2] 2024-12-11T17:43:40,260 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-11T17:43:40,263 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35140, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-11T17:43:40,267 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-11T17:43:40,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-11T17:43:40,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:40,271 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-11T17:43:40,273 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-11T17:43:40,273 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-11T17:43:40,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:40,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=45861 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-11T17:43:40,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,438 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 799cfdaf9cb42a437181e24950668ac1 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-11T17:43:40,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/.tmp/cf/ffd26b638bba41199a870ff66fd6d141 is 36, key is row/cf:cq/1733939020264/Put/seqid=0 2024-12-11T17:43:40,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741839_1015 (size=4787) 2024-12-11T17:43:40,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741839_1015 (size=4787) 2024-12-11T17:43:40,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741839_1015 (size=4787) 2024-12-11T17:43:40,544 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/.tmp/cf/ffd26b638bba41199a870ff66fd6d141 2024-12-11T17:43:40,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/.tmp/cf/ffd26b638bba41199a870ff66fd6d141 as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/cf/ffd26b638bba41199a870ff66fd6d141 2024-12-11T17:43:40,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:40,595 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/cf/ffd26b638bba41199a870ff66fd6d141, entries=1, sequenceid=5, filesize=4.7 K 2024-12-11T17:43:40,597 INFO [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 799cfdaf9cb42a437181e24950668ac1 in 159ms, sequenceid=5, compaction requested=false 2024-12-11T17:43:40,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 799cfdaf9cb42a437181e24950668ac1: 2024-12-11T17:43:40,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/75744186b12a:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-11T17:43:40,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-11T17:43:40,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-11T17:43:40,610 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 328 msec 2024-12-11T17:43:40,618 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 345 msec 2024-12-11T17:43:40,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42897 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-11T17:43:40,912 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-11T17:43:40,917 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-11T17:43:40,917 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T17:43:40,917 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:40,917 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,918 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-11T17:43:40,918 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1868698441, stopped=false 2024-12-11T17:43:40,918 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=75744186b12a,42897,1733939017363 2024-12-11T17:43:40,918 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-11T17:43:40,977 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:40,977 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:40,977 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:40,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-11T17:43:40,977 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:40,977 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:40,978 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:40,978 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T17:43:40,978 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-11T17:43:40,978 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:40,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:40,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:40,979 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:40,978 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-11T17:43:40,978 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:40,979 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,45861,1733939017536' ***** 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,43475,1733939017586' ***** 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '75744186b12a,46677,1733939017631' ***** 2024-12-11T17:43:40,979 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,43475,1733939017586 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,46677,1733939017631 2024-12-11T17:43:40,980 INFO [RS:0;75744186b12a:45861 {}] regionserver.HeapMemoryManager(224): Stopping 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;75744186b12a:43475. 2024-12-11T17:43:40,980 INFO [RS:0;75744186b12a:45861 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-11T17:43:40,980 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:40,980 INFO [RS:2;75744186b12a:46677 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;75744186b12a:46677. 2024-12-11T17:43:40,980 INFO [RS:0;75744186b12a:45861 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-11T17:43:40,980 DEBUG [RS:1;75744186b12a:43475 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:40,980 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(3091): Received CLOSE for 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,980 DEBUG [RS:1;75744186b12a:43475 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,980 DEBUG [RS:2;75744186b12a:46677 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:40,980 DEBUG [RS:2;75744186b12a:46677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,980 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,43475,1733939017586; all regions closed. 2024-12-11T17:43:40,981 INFO [RS:2;75744186b12a:46677 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:40,981 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:40,981 INFO [RS:2;75744186b12a:46677 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:40,981 INFO [RS:2;75744186b12a:46677 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:40,981 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(959): stopping server 75744186b12a,45861,1733939017536 2024-12-11T17:43:40,981 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-11T17:43:40,981 INFO [RS:0;75744186b12a:45861 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:40,981 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-11T17:43:40,981 INFO [RS:0;75744186b12a:45861 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;75744186b12a:45861. 2024-12-11T17:43:40,981 DEBUG [RS:0;75744186b12a:45861 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-11T17:43:40,981 DEBUG [RS:0;75744186b12a:45861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,981 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 799cfdaf9cb42a437181e24950668ac1, disabling compactions & flushes 2024-12-11T17:43:40,981 INFO [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,982 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T17:43:40,982 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,982 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1325): Online Regions={799cfdaf9cb42a437181e24950668ac1=TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1.} 2024-12-11T17:43:40,982 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. after waiting 0 ms 2024-12-11T17:43:40,982 DEBUG [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1351): Waiting on 799cfdaf9cb42a437181e24950668ac1 2024-12-11T17:43:40,982 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,982 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:40,982 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:40,983 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:40,983 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:40,983 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:40,983 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-11T17:43:40,984 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-11T17:43:40,984 DEBUG [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-11T17:43:40,984 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-11T17:43:40,984 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-11T17:43:40,984 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-11T17:43:40,984 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-11T17:43:40,984 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-11T17:43:40,984 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-11T17:43:40,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741834_1010 (size=93) 2024-12-11T17:43:40,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741834_1010 (size=93) 2024-12-11T17:43:40,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741834_1010 (size=93) 2024-12-11T17:43:40,992 DEBUG [RS:1;75744186b12a:43475 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs 2024-12-11T17:43:40,992 INFO [RS:1;75744186b12a:43475 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 75744186b12a%2C43475%2C1733939017586:(num 1733939018587) 2024-12-11T17:43:40,992 DEBUG [RS:1;75744186b12a:43475 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:40,992 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/default/TestHBaseWalOnEC/799cfdaf9cb42a437181e24950668ac1/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-11T17:43:40,992 INFO [RS:1;75744186b12a:43475 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:40,993 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:40,993 INFO [RS:1;75744186b12a:43475 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43475 2024-12-11T17:43:40,993 INFO [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:40,994 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 799cfdaf9cb42a437181e24950668ac1: Waiting for close lock at 1733939020981Running coprocessor pre-close hooks at 1733939020981Disabling compacts and flushes for region at 1733939020981Disabling writes for close at 1733939020982 (+1 ms)Writing region close event to WAL at 1733939020984 (+2 ms)Running coprocessor post-close hooks at 1733939020993 (+9 ms)Closed at 1733939020993 2024-12-11T17:43:40,994 DEBUG [RS_CLOSE_REGION-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1. 2024-12-11T17:43:41,004 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,43475,1733939017586 2024-12-11T17:43:41,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:41,004 INFO [RS:1;75744186b12a:43475 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:41,006 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/info/7fdec82dcaac46198f7b58795ed16f82 is 153, key is TestHBaseWalOnEC,,1733939019103.799cfdaf9cb42a437181e24950668ac1./info:regioninfo/1733939020022/Put/seqid=0 2024-12-11T17:43:41,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,43475,1733939017586] 2024-12-11T17:43:41,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741840_1016 (size=6637) 2024-12-11T17:43:41,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741840_1016 (size=6637) 2024-12-11T17:43:41,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741840_1016 (size=6637) 2024-12-11T17:43:41,020 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,43475,1733939017586 already deleted, retry=false 2024-12-11T17:43:41,020 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,43475,1733939017586 expired; onlineServers=2 2024-12-11T17:43:41,021 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/info/7fdec82dcaac46198f7b58795ed16f82 2024-12-11T17:43:41,041 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:41,041 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:41,050 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/ns/41a7bbb3bb43466a93cdc2ce4c7f3457 is 43, key is default/ns:d/1733939018966/Put/seqid=0 2024-12-11T17:43:41,053 INFO [regionserver/75744186b12a:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:41,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741841_1017 (size=5153) 2024-12-11T17:43:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741841_1017 (size=5153) 2024-12-11T17:43:41,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741841_1017 (size=5153) 2024-12-11T17:43:41,058 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/ns/41a7bbb3bb43466a93cdc2ce4c7f3457 2024-12-11T17:43:41,082 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/table/5fa4258986004f72ad66be1a69ad60b9 is 52, key is TestHBaseWalOnEC/table:state/1733939020053/Put/seqid=0 2024-12-11T17:43:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741842_1018 (size=5249) 2024-12-11T17:43:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741842_1018 (size=5249) 2024-12-11T17:43:41,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741842_1018 (size=5249) 2024-12-11T17:43:41,094 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/table/5fa4258986004f72ad66be1a69ad60b9 2024-12-11T17:43:41,102 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/info/7fdec82dcaac46198f7b58795ed16f82 as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/info/7fdec82dcaac46198f7b58795ed16f82 2024-12-11T17:43:41,110 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/info/7fdec82dcaac46198f7b58795ed16f82, entries=10, sequenceid=11, filesize=6.5 K 2024-12-11T17:43:41,112 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/ns/41a7bbb3bb43466a93cdc2ce4c7f3457 as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/ns/41a7bbb3bb43466a93cdc2ce4c7f3457 2024-12-11T17:43:41,112 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,112 INFO [RS:1;75744186b12a:43475 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:41,112 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43475-0x1001607750e0002, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,112 INFO [RS:1;75744186b12a:43475 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,43475,1733939017586; zookeeper connection closed. 2024-12-11T17:43:41,113 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a14f076 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a14f076 2024-12-11T17:43:41,120 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/ns/41a7bbb3bb43466a93cdc2ce4c7f3457, entries=2, sequenceid=11, filesize=5.0 K 2024-12-11T17:43:41,121 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/.tmp/table/5fa4258986004f72ad66be1a69ad60b9 as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/table/5fa4258986004f72ad66be1a69ad60b9 2024-12-11T17:43:41,131 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/table/5fa4258986004f72ad66be1a69ad60b9, entries=2, sequenceid=11, filesize=5.1 K 2024-12-11T17:43:41,132 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 148ms, sequenceid=11, compaction requested=false 2024-12-11T17:43:41,139 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-11T17:43:41,140 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-11T17:43:41,140 INFO [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:41,140 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733939020984Running coprocessor pre-close hooks at 1733939020984Disabling compacts and flushes for region at 1733939020984Disabling writes for close at 1733939020984Obtaining lock to block concurrent updates at 1733939020984Preparing flush snapshotting stores in 1588230740 at 1733939020984Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733939020985 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733939020986 (+1 ms)Flushing 1588230740/info: creating writer at 1733939020986Flushing 1588230740/info: appending metadata at 1733939021006 (+20 ms)Flushing 1588230740/info: closing flushed file at 1733939021006Flushing 1588230740/ns: creating writer at 1733939021029 (+23 ms)Flushing 1588230740/ns: appending metadata at 1733939021049 (+20 ms)Flushing 1588230740/ns: closing flushed file at 1733939021049Flushing 1588230740/table: creating writer at 1733939021066 (+17 ms)Flushing 1588230740/table: appending metadata at 1733939021082 (+16 ms)Flushing 1588230740/table: closing flushed file at 1733939021082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27d337cc: reopening flushed file at 1733939021101 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59972e4d: reopening flushed file at 1733939021111 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@584518db: reopening flushed file at 1733939021120 (+9 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 148ms, sequenceid=11, compaction requested=false at 1733939021133 (+13 ms)Writing region close event to WAL at 1733939021134 (+1 ms)Running coprocessor post-close hooks at 1733939021140 (+6 ms)Closed at 1733939021140 2024-12-11T17:43:41,140 DEBUG [RS_CLOSE_META-regionserver/75744186b12a:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-11T17:43:41,182 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,45861,1733939017536; all regions closed. 2024-12-11T17:43:41,183 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,183 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,183 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,183 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,183 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,184 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(976): stopping server 75744186b12a,46677,1733939017631; all regions closed. 2024-12-11T17:43:41,184 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,184 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741835_1011 (size=1298) 2024-12-11T17:43:41,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741835_1011 (size=1298) 2024-12-11T17:43:41,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741836_1012 (size=2751) 2024-12-11T17:43:41,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741836_1012 (size=2751) 2024-12-11T17:43:41,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741835_1011 (size=1298) 2024-12-11T17:43:41,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741836_1012 (size=2751) 2024-12-11T17:43:41,192 DEBUG [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs 2024-12-11T17:43:41,192 INFO [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 75744186b12a%2C46677%2C1733939017631.meta:.meta(num 1733939018851) 2024-12-11T17:43:41,192 DEBUG [RS:0;75744186b12a:45861 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs 2024-12-11T17:43:41,192 INFO [RS:0;75744186b12a:45861 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 75744186b12a%2C45861%2C1733939017536:(num 1733939018605) 2024-12-11T17:43:41,192 DEBUG [RS:0;75744186b12a:45861 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:41,192 INFO [RS:0;75744186b12a:45861 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:41,192 INFO [RS:0;75744186b12a:45861 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:41,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,192 INFO [RS:0;75744186b12a:45861 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:41,192 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,193 INFO [RS:0;75744186b12a:45861 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-11T17:43:41,193 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,193 INFO [RS:0;75744186b12a:45861 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-11T17:43:41,193 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:41,193 INFO [RS:0;75744186b12a:45861 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-11T17:43:41,193 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,193 INFO [RS:0;75744186b12a:45861 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:41,193 INFO [RS:0;75744186b12a:45861 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45861 2024-12-11T17:43:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741833_1009 (size=93) 2024-12-11T17:43:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741833_1009 (size=93) 2024-12-11T17:43:41,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741833_1009 (size=93) 2024-12-11T17:43:41,199 DEBUG [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/oldWALs 2024-12-11T17:43:41,199 INFO [RS:2;75744186b12a:46677 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 75744186b12a%2C46677%2C1733939017631:(num 1733939018582) 2024-12-11T17:43:41,199 DEBUG [RS:2;75744186b12a:46677 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-11T17:43:41,199 INFO [RS:2;75744186b12a:46677 {}] regionserver.LeaseManager(133): Closed leases 2024-12-11T17:43:41,199 INFO [RS:2;75744186b12a:46677 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:41,199 INFO [RS:2;75744186b12a:46677 {}] hbase.ChoreService(370): Chore service for: regionserver/75744186b12a:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:41,199 INFO [RS:2;75744186b12a:46677 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:41,199 INFO [regionserver/75744186b12a:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:41,200 INFO [RS:2;75744186b12a:46677 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46677 2024-12-11T17:43:41,211 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,45861,1733939017536 2024-12-11T17:43:41,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-11T17:43:41,211 INFO [RS:0;75744186b12a:45861 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:41,220 INFO [RS:2;75744186b12a:46677 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:41,220 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/75744186b12a,46677,1733939017631 2024-12-11T17:43:41,220 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,45861,1733939017536] 2024-12-11T17:43:41,237 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,45861,1733939017536 already deleted, retry=false 2024-12-11T17:43:41,237 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,45861,1733939017536 expired; onlineServers=1 2024-12-11T17:43:41,237 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [75744186b12a,46677,1733939017631] 2024-12-11T17:43:41,245 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/75744186b12a,46677,1733939017631 already deleted, retry=false 2024-12-11T17:43:41,245 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 75744186b12a,46677,1733939017631 expired; onlineServers=0 2024-12-11T17:43:41,245 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '75744186b12a,42897,1733939017363' ***** 2024-12-11T17:43:41,245 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-11T17:43:41,246 INFO [M:0;75744186b12a:42897 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-11T17:43:41,246 INFO [M:0;75744186b12a:42897 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-11T17:43:41,246 DEBUG [M:0;75744186b12a:42897 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-11T17:43:41,246 DEBUG [M:0;75744186b12a:42897 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-11T17:43:41,246 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-11T17:43:41,246 DEBUG [master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939018186 {}] cleaner.HFileCleaner(306): Exit Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.small.0-1733939018186,5,FailOnTimeoutGroup] 2024-12-11T17:43:41,246 DEBUG [master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939018186 {}] cleaner.HFileCleaner(306): Exit Thread[master/75744186b12a:0:becomeActiveMaster-HFileCleaner.large.0-1733939018186,5,FailOnTimeoutGroup] 2024-12-11T17:43:41,246 INFO [M:0;75744186b12a:42897 {}] hbase.ChoreService(370): Chore service for: master/75744186b12a:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-11T17:43:41,246 INFO [M:0;75744186b12a:42897 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-11T17:43:41,246 DEBUG [M:0;75744186b12a:42897 {}] master.HMaster(1795): Stopping service threads 2024-12-11T17:43:41,247 INFO [M:0;75744186b12a:42897 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-11T17:43:41,247 INFO [M:0;75744186b12a:42897 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-11T17:43:41,247 INFO [M:0;75744186b12a:42897 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-11T17:43:41,247 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-11T17:43:41,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-11T17:43:41,254 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-11T17:43:41,254 DEBUG [M:0;75744186b12a:42897 {}] zookeeper.ZKUtil(347): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-11T17:43:41,254 WARN [M:0;75744186b12a:42897 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-11T17:43:41,256 INFO [M:0;75744186b12a:42897 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/.lastflushedseqids 2024-12-11T17:43:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741843_1019 (size=127) 2024-12-11T17:43:41,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741843_1019 (size=127) 2024-12-11T17:43:41,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741843_1019 (size=127) 2024-12-11T17:43:41,267 INFO [M:0;75744186b12a:42897 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-11T17:43:41,268 INFO [M:0;75744186b12a:42897 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-11T17:43:41,268 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-11T17:43:41,268 INFO [M:0;75744186b12a:42897 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:41,268 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:41,268 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-11T17:43:41,268 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:41,268 INFO [M:0;75744186b12a:42897 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-11T17:43:41,290 DEBUG [M:0;75744186b12a:42897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6dc3b47b6bf459298c5203824f016dd is 82, key is hbase:meta,,1/info:regioninfo/1733939018918/Put/seqid=0 2024-12-11T17:43:41,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741844_1020 (size=5672) 2024-12-11T17:43:41,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741844_1020 (size=5672) 2024-12-11T17:43:41,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741844_1020 (size=5672) 2024-12-11T17:43:41,300 INFO [M:0;75744186b12a:42897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6dc3b47b6bf459298c5203824f016dd 2024-12-11T17:43:41,329 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,329 INFO [RS:0;75744186b12a:45861 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:41,329 INFO [RS:0;75744186b12a:45861 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,45861,1733939017536; zookeeper connection closed. 2024-12-11T17:43:41,329 DEBUG [pool-324-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45861-0x1001607750e0001, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,329 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5cf2e690 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5cf2e690 2024-12-11T17:43:41,330 DEBUG [M:0;75744186b12a:42897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de7bee67c7d5426ba6c195add1b409ad is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733939020062/Put/seqid=0 2024-12-11T17:43:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741845_1021 (size=6438) 2024-12-11T17:43:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741845_1021 (size=6438) 2024-12-11T17:43:41,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741845_1021 (size=6438) 2024-12-11T17:43:41,337 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,337 INFO [RS:2;75744186b12a:46677 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:41,337 DEBUG [pool-336-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46677-0x1001607750e0003, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,337 INFO [RS:2;75744186b12a:46677 {}] regionserver.HRegionServer(1031): Exiting; stopping=75744186b12a,46677,1733939017631; zookeeper connection closed. 2024-12-11T17:43:41,339 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@26dbe4a9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@26dbe4a9 2024-12-11T17:43:41,339 INFO [M:0;75744186b12a:42897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de7bee67c7d5426ba6c195add1b409ad 2024-12-11T17:43:41,339 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-11T17:43:41,361 DEBUG [M:0;75744186b12a:42897 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b74e9d5004e14865b60bf34158cd0cd7 is 69, key is 75744186b12a,43475,1733939017586/rs:state/1733939018344/Put/seqid=0 2024-12-11T17:43:41,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741846_1022 (size=5294) 2024-12-11T17:43:41,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741846_1022 (size=5294) 2024-12-11T17:43:41,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741846_1022 (size=5294) 2024-12-11T17:43:41,369 INFO [M:0;75744186b12a:42897 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b74e9d5004e14865b60bf34158cd0cd7 2024-12-11T17:43:41,378 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b6dc3b47b6bf459298c5203824f016dd as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b6dc3b47b6bf459298c5203824f016dd 2024-12-11T17:43:41,387 INFO [M:0;75744186b12a:42897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b6dc3b47b6bf459298c5203824f016dd, entries=8, sequenceid=72, filesize=5.5 K 2024-12-11T17:43:41,389 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/de7bee67c7d5426ba6c195add1b409ad as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de7bee67c7d5426ba6c195add1b409ad 2024-12-11T17:43:41,399 INFO [M:0;75744186b12a:42897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/de7bee67c7d5426ba6c195add1b409ad, entries=8, sequenceid=72, filesize=6.3 K 2024-12-11T17:43:41,402 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b74e9d5004e14865b60bf34158cd0cd7 as hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b74e9d5004e14865b60bf34158cd0cd7 2024-12-11T17:43:41,419 INFO [M:0;75744186b12a:42897 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40023/user/jenkins/test-data/90aece8c-33ba-a385-a105-b6dcc0ee3762/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b74e9d5004e14865b60bf34158cd0cd7, entries=3, sequenceid=72, filesize=5.2 K 2024-12-11T17:43:41,421 INFO [M:0;75744186b12a:42897 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=72, compaction requested=false 2024-12-11T17:43:41,423 INFO [M:0;75744186b12a:42897 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-11T17:43:41,423 DEBUG [M:0;75744186b12a:42897 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733939021268Disabling compacts and flushes for region at 1733939021268Disabling writes for close at 1733939021268Obtaining lock to block concurrent updates at 1733939021268Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733939021268Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733939021269 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733939021270 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733939021270Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733939021290 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733939021290Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733939021308 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733939021329 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733939021329Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733939021345 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733939021361 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733939021361Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@679397b0: reopening flushed file at 1733939021377 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b2522bb: reopening flushed file at 1733939021388 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@89814ff: reopening flushed file at 1733939021400 (+12 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 153ms, sequenceid=72, compaction requested=false at 1733939021421 (+21 ms)Writing region close event to WAL at 1733939021423 (+2 ms)Closed at 1733939021423 2024-12-11T17:43:41,423 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,423 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-11T17:43:41,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35363 is added to blk_1073741830_1006 (size=32662) 2024-12-11T17:43:41,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44279 is added to blk_1073741830_1006 (size=32662) 2024-12-11T17:43:41,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38503 is added to blk_1073741830_1006 (size=32662) 2024-12-11T17:43:41,428 INFO [M:0;75744186b12a:42897 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-11T17:43:41,428 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-11T17:43:41,428 INFO [M:0;75744186b12a:42897 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42897 2024-12-11T17:43:41,428 INFO [M:0;75744186b12a:42897 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-11T17:43:41,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,538 INFO [M:0;75744186b12a:42897 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-11T17:43:41,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42897-0x1001607750e0000, quorum=127.0.0.1:63672, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-11T17:43:41,544 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30ad1b96{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:41,545 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ec884b9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:41,545 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:41,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f79a540{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:41,546 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34635d45{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:41,547 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:41,547 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:41,547 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366890980-172.17.0.2-1733939014221 (Datanode Uuid ffd3cbc3-7d51-40a2-85e1-a9ab266b7467) service to localhost/127.0.0.1:40023 2024-12-11T17:43:41,547 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:41,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data5/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,549 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data6/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,549 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:41,576 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a6698d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:41,579 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@627fdc4a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:41,579 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:41,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@433bb18e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:41,580 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e4808da{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:41,582 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:41,582 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:41,582 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:41,582 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366890980-172.17.0.2-1733939014221 (Datanode Uuid 886a14d1-e86f-40f9-95e2-b471c33d681e) service to localhost/127.0.0.1:40023 2024-12-11T17:43:41,583 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data4/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,584 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data3/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,584 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:41,596 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@165ba57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-11T17:43:41,598 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1ed60080{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:41,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:41,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b7d65bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:41,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10b050c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:41,608 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-11T17:43:41,608 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-11T17:43:41,608 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-11T17:43:41,608 WARN [BP-1366890980-172.17.0.2-1733939014221 heartbeating to localhost/127.0.0.1:40023 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1366890980-172.17.0.2-1733939014221 (Datanode Uuid ae240149-aa00-4dfd-9236-4ebaecc5f02c) service to localhost/127.0.0.1:40023 2024-12-11T17:43:41,609 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data1/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,610 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/cluster_c5e87b14-60ff-58f0-e9fd-973d976ee656/data/data2/current/BP-1366890980-172.17.0.2-1733939014221 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-11T17:43:41,610 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-11T17:43:41,618 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4ef2d06{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-11T17:43:41,619 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ce28da4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-11T17:43:41,619 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-11T17:43:41,619 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d625937{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-11T17:43:41,620 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bb5145b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/d044159c-5b7f-281b-fab5-e3790b70b4a2/hadoop.log.dir/,STOPPED} 2024-12-11T17:43:41,633 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-11T17:43:41,667 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-11T17:43:41,676 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=147 (was 86) - Thread LEAK? -, OpenFileDescriptor=518 (was 445) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=702 (was 608) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3355 (was 4122)