2024-11-08 03:50:00,177 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 03:50:00,189 main DEBUG Took 0.010285 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-08 03:50:00,189 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-08 03:50:00,190 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-08 03:50:00,191 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-08 03:50:00,192 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,208 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-08 03:50:00,219 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,220 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,220 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,221 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,221 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,221 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,222 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,222 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,223 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,223 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,224 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,224 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,225 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,225 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,226 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,226 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,226 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,227 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,227 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,228 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,228 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,229 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,229 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-08 03:50:00,229 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,229 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-08 03:50:00,231 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-08 03:50:00,232 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-08 03:50:00,234 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-08 03:50:00,234 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-08 03:50:00,235 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-08 03:50:00,236 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-08 03:50:00,243 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-08 03:50:00,246 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-08 03:50:00,247 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-08 03:50:00,248 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-08 03:50:00,248 main DEBUG createAppenders(={Console}) 2024-11-08 03:50:00,249 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-08 03:50:00,249 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-08 03:50:00,249 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-08 03:50:00,250 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-08 03:50:00,250 main DEBUG OutputStream closed 2024-11-08 03:50:00,250 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-08 03:50:00,250 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-08 03:50:00,251 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-08 03:50:00,319 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-08 03:50:00,321 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-08 03:50:00,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-08 03:50:00,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-08 03:50:00,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-08 03:50:00,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-08 03:50:00,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-08 03:50:00,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-08 03:50:00,327 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-08 03:50:00,327 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-08 03:50:00,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-08 03:50:00,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-08 03:50:00,328 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-08 03:50:00,329 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-08 03:50:00,329 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-08 03:50:00,329 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-08 03:50:00,330 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-08 03:50:00,331 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-08 03:50:00,333 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08 03:50:00,333 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-08 03:50:00,333 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-08 03:50:00,334 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-08T03:50:00,351 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-08 03:50:00,353 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-08 03:50:00,354 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-08T03:50:00,604 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814 2024-11-08T03:50:00,629 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c, deleteOnExit=true 2024-11-08T03:50:00,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/test.cache.data in system properties and HBase conf 2024-11-08T03:50:00,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T03:50:00,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir in system properties and HBase conf 2024-11-08T03:50:00,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T03:50:00,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T03:50:00,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T03:50:00,718 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-08T03:50:00,802 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T03:50:00,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T03:50:00,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T03:50:00,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T03:50:00,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T03:50:00,808 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T03:50:00,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T03:50:00,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T03:50:00,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T03:50:00,810 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T03:50:00,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/nfs.dump.dir in system properties and HBase conf 2024-11-08T03:50:00,811 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/java.io.tmpdir in system properties and HBase conf 2024-11-08T03:50:00,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T03:50:00,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T03:50:00,812 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T03:50:01,915 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-08T03:50:01,987 INFO [Time-limited test {}] log.Log(170): Logging initialized @2441ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-08T03:50:02,058 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:02,123 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:02,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:02,144 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:02,145 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T03:50:02,157 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:02,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b03c34d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:02,161 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3cbd6fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:02,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cb83937{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/java.io.tmpdir/jetty-localhost-38487-hadoop-hdfs-3_4_1-tests_jar-_-any-6412116520238653830/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T03:50:02,356 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69b5b273{HTTP/1.1, (http/1.1)}{localhost:38487} 2024-11-08T03:50:02,357 INFO [Time-limited test {}] server.Server(415): Started @2811ms 2024-11-08T03:50:02,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:02,949 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:02,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:02,950 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:02,950 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T03:50:02,951 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@179ed6d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:02,952 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4f02cc61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:03,054 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3990ff75{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/java.io.tmpdir/jetty-localhost-35979-hadoop-hdfs-3_4_1-tests_jar-_-any-16782366804632810460/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:03,055 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48731e1b{HTTP/1.1, (http/1.1)}{localhost:35979} 2024-11-08T03:50:03,055 INFO [Time-limited test {}] server.Server(415): Started @3509ms 2024-11-08T03:50:03,103 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:03,201 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:03,209 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:03,212 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:03,213 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:03,213 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T03:50:03,214 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@50510811{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:03,215 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@125705fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:03,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@133f1bad{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/java.io.tmpdir/jetty-localhost-39073-hadoop-hdfs-3_4_1-tests_jar-_-any-16978383314988703502/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:03,348 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@39860596{HTTP/1.1, (http/1.1)}{localhost:39073} 2024-11-08T03:50:03,349 INFO [Time-limited test {}] server.Server(415): Started @3803ms 2024-11-08T03:50:03,351 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:03,385 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:03,389 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:03,390 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:03,391 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:03,391 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T03:50:03,391 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28d0ee11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:03,392 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@14c1b227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:03,491 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@17f8e572{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/java.io.tmpdir/jetty-localhost-37097-hadoop-hdfs-3_4_1-tests_jar-_-any-8845891118363042166/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:03,491 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11aaae40{HTTP/1.1, (http/1.1)}{localhost:37097} 2024-11-08T03:50:03,492 INFO [Time-limited test {}] server.Server(415): Started @3946ms 2024-11-08T03:50:03,494 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:05,009 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data3/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,009 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data4/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,009 WARN [Thread-128 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data1/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,009 WARN [Thread-127 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data6/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,009 WARN [Thread-129 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data2/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,009 WARN [Thread-126 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data5/current/BP-1412343470-172.17.0.3-1731037801377/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:05,048 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:05,048 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:05,048 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:05,095 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x516a32637dd6211a with lease ID 0x36614d5f9450aa84: Processing first storage report for DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9 from datanode DatanodeRegistration(127.0.0.1:42839, datanodeUuid=40bf6719-82e2-491f-b32c-24904674396c, infoPort=37607, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x516a32637dd6211a with lease ID 0x36614d5f9450aa84: from storage DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9 node DatanodeRegistration(127.0.0.1:42839, datanodeUuid=40bf6719-82e2-491f-b32c-24904674396c, infoPort=37607, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7baf8d84cd0e3460 with lease ID 0x36614d5f9450aa85: Processing first storage report for DS-6a0d724f-868c-4cfd-899e-76beed9138ed from datanode DatanodeRegistration(127.0.0.1:43295, datanodeUuid=10059244-cf2b-4b9b-8b92-f22ec880b825, infoPort=35489, infoSecurePort=0, ipcPort=46329, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,096 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7baf8d84cd0e3460 with lease ID 0x36614d5f9450aa85: from storage DS-6a0d724f-868c-4cfd-899e-76beed9138ed node DatanodeRegistration(127.0.0.1:43295, datanodeUuid=10059244-cf2b-4b9b-8b92-f22ec880b825, infoPort=35489, infoSecurePort=0, ipcPort=46329, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7db39fd0385af5eb with lease ID 0x36614d5f9450aa83: Processing first storage report for DS-c8f168e1-0407-45f4-af3d-da4b6037178e from datanode DatanodeRegistration(127.0.0.1:33069, datanodeUuid=77707996-e807-4342-a960-0e4b5a7c8c2b, infoPort=33831, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7db39fd0385af5eb with lease ID 0x36614d5f9450aa83: from storage DS-c8f168e1-0407-45f4-af3d-da4b6037178e node DatanodeRegistration(127.0.0.1:33069, datanodeUuid=77707996-e807-4342-a960-0e4b5a7c8c2b, infoPort=33831, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x516a32637dd6211a with lease ID 0x36614d5f9450aa84: Processing first storage report for DS-89b6e834-216b-4f9f-9588-b5813cd584d2 from datanode DatanodeRegistration(127.0.0.1:42839, datanodeUuid=40bf6719-82e2-491f-b32c-24904674396c, infoPort=37607, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x516a32637dd6211a with lease ID 0x36614d5f9450aa84: from storage DS-89b6e834-216b-4f9f-9588-b5813cd584d2 node DatanodeRegistration(127.0.0.1:42839, datanodeUuid=40bf6719-82e2-491f-b32c-24904674396c, infoPort=37607, infoSecurePort=0, ipcPort=37835, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,097 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7baf8d84cd0e3460 with lease ID 0x36614d5f9450aa85: Processing first storage report for DS-06af2c04-2ba1-4848-bc49-a53843c8a3de from datanode DatanodeRegistration(127.0.0.1:43295, datanodeUuid=10059244-cf2b-4b9b-8b92-f22ec880b825, infoPort=35489, infoSecurePort=0, ipcPort=46329, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,098 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7baf8d84cd0e3460 with lease ID 0x36614d5f9450aa85: from storage DS-06af2c04-2ba1-4848-bc49-a53843c8a3de node DatanodeRegistration(127.0.0.1:43295, datanodeUuid=10059244-cf2b-4b9b-8b92-f22ec880b825, infoPort=35489, infoSecurePort=0, ipcPort=46329, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,098 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7db39fd0385af5eb with lease ID 0x36614d5f9450aa83: Processing first storage report for DS-e33e895e-802e-46d3-bc1f-e23f49beb2db from datanode DatanodeRegistration(127.0.0.1:33069, datanodeUuid=77707996-e807-4342-a960-0e4b5a7c8c2b, infoPort=33831, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377) 2024-11-08T03:50:05,098 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7db39fd0385af5eb with lease ID 0x36614d5f9450aa83: from storage DS-e33e895e-802e-46d3-bc1f-e23f49beb2db node DatanodeRegistration(127.0.0.1:33069, datanodeUuid=77707996-e807-4342-a960-0e4b5a7c8c2b, infoPort=33831, infoSecurePort=0, ipcPort=42299, storageInfo=lv=-57;cid=testClusterID;nsid=1837065031;c=1731037801377), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:05,104 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814 2024-11-08T03:50:05,168 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-08T03:50:05,229 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=393, MaxFileDescriptor=1048576, SystemLoadAverage=62, ProcessCount=11, AvailableMemoryMB=13091 2024-11-08T03:50:05,231 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T03:50:05,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-08T03:50:05,320 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/zookeeper_0, clientPort=56972, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T03:50:05,332 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56972 2024-11-08T03:50:05,354 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:05,357 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:05,449 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:05,450 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:05,506 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41376 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41376 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:05,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-08T03:50:05,928 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:05,941 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 with version=8 2024-11-08T03:50:05,942 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/hbase-staging 2024-11-08T03:50:06,026 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-08T03:50:06,273 INFO [Time-limited test {}] client.ConnectionUtils(128): master/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:06,281 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,282 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,286 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:06,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,286 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:06,408 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T03:50:06,458 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-08T03:50:06,465 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-08T03:50:06,468 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:06,489 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 91797 (auto-detected) 2024-11-08T03:50:06,490 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-08T03:50:06,505 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:45513 2024-11-08T03:50:06,523 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45513 connecting to ZooKeeper ensemble=127.0.0.1:56972 2024-11-08T03:50:06,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:455130x0, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:06,659 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45513-0x1011890b6a10000 connected 2024-11-08T03:50:06,750 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,753 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,761 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:06,764 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407, hbase.cluster.distributed=false 2024-11-08T03:50:06,787 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:06,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45513 2024-11-08T03:50:06,792 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45513 2024-11-08T03:50:06,793 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45513 2024-11-08T03:50:06,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45513 2024-11-08T03:50:06,794 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45513 2024-11-08T03:50:06,889 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:06,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,890 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,890 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:06,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,891 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:06,893 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:06,897 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:06,898 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37985 2024-11-08T03:50:06,901 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37985 connecting to ZooKeeper ensemble=127.0.0.1:56972 2024-11-08T03:50:06,902 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,909 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,926 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:379850x0, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:06,926 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:379850x0, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:06,927 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37985-0x1011890b6a10001 connected 2024-11-08T03:50:06,930 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:06,937 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:06,940 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:06,945 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:06,948 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-08T03:50:06,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37985 2024-11-08T03:50:06,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37985 2024-11-08T03:50:06,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-08T03:50:06,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37985 2024-11-08T03:50:06,965 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:06,965 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,966 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:06,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:06,966 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:06,966 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:06,967 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:06,968 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:36433 2024-11-08T03:50:06,969 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36433 connecting to ZooKeeper ensemble=127.0.0.1:56972 2024-11-08T03:50:06,970 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:06,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364330x0, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:06,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364330x0, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:06,989 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36433-0x1011890b6a10002 connected 2024-11-08T03:50:06,990 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:06,991 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:06,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:06,994 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:06,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36433 2024-11-08T03:50:06,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36433 2024-11-08T03:50:06,997 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36433 2024-11-08T03:50:06,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36433 2024-11-08T03:50:07,001 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36433 2024-11-08T03:50:07,016 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:07,017 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:07,018 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:07,019 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42565 2024-11-08T03:50:07,021 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42565 connecting to ZooKeeper ensemble=127.0.0.1:56972 2024-11-08T03:50:07,023 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:07,025 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:07,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425650x0, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:07,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425650x0, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:07,042 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42565-0x1011890b6a10003 connected 2024-11-08T03:50:07,043 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:07,044 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:07,045 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:07,047 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:07,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42565 2024-11-08T03:50:07,049 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42565 2024-11-08T03:50:07,050 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42565 2024-11-08T03:50:07,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42565 2024-11-08T03:50:07,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42565 2024-11-08T03:50:07,068 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;350fccc412b5:45513 2024-11-08T03:50:07,069 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,086 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,115 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,117 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T03:50:07,118 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/350fccc412b5,45513,1731037806126 from backup master directory 2024-11-08T03:50:07,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:07,127 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:07,127 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,129 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-08T03:50:07,130 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-08T03:50:07,188 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/hbase.id] with ID: 30486984-cf3e-4b43-9c05-189e37fbcd03 2024-11-08T03:50:07,189 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/.tmp/hbase.id 2024-11-08T03:50:07,196 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,196 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,200 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:58088 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58088 dst: /127.0.0.1:42839 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:07,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-08T03:50:07,206 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:07,206 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/.tmp/hbase.id]:[hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/hbase.id] 2024-11-08T03:50:07,247 INFO [master/350fccc412b5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:07,252 INFO [master/350fccc412b5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T03:50:07,270 INFO [master/350fccc412b5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-08T03:50:07,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,295 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,295 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,298 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41402 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41402 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:07,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-08T03:50:07,304 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:07,317 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T03:50:07,318 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T03:50:07,323 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T03:50:07,348 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,349 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,353 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41422 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41422 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:07,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-08T03:50:07,359 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:07,376 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store 2024-11-08T03:50:07,393 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,393 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:07,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:48970 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43295:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48970 dst: /127.0.0.1:43295 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:07,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-08T03:50:07,402 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:07,406 INFO [master/350fccc412b5:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-08T03:50:07,409 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:07,410 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T03:50:07,410 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:07,410 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:07,412 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T03:50:07,412 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:07,412 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:07,413 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731037807410Disabling compacts and flushes for region at 1731037807410Disabling writes for close at 1731037807412 (+2 ms)Writing region close event to WAL at 1731037807412Closed at 1731037807412 2024-11-08T03:50:07,416 WARN [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/.initializing 2024-11-08T03:50:07,416 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/WALs/350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,426 INFO [master/350fccc412b5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T03:50:07,441 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C45513%2C1731037806126, suffix=, logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/WALs/350fccc412b5,45513,1731037806126, archiveDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/oldWALs, maxLogs=10 2024-11-08T03:50:07,476 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/WALs/350fccc412b5,45513,1731037806126/350fccc412b5%2C45513%2C1731037806126.1731037807446, exclude list is [], retry=0 2024-11-08T03:50:07,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:414) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:473) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:468) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:07,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43295,DS-6a0d724f-868c-4cfd-899e-76beed9138ed,DISK] 2024-11-08T03:50:07,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33069,DS-c8f168e1-0407-45f4-af3d-da4b6037178e,DISK] 2024-11-08T03:50:07,497 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42839,DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9,DISK] 2024-11-08T03:50:07,500 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-08T03:50:07,540 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/WALs/350fccc412b5,45513,1731037806126/350fccc412b5%2C45513%2C1731037806126.1731037807446 2024-11-08T03:50:07,540 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37607:37607),(127.0.0.1/127.0.0.1:33831:33831),(127.0.0.1/127.0.0.1:35489:35489)] 2024-11-08T03:50:07,541 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:07,541 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:07,544 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,545 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,580 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T03:50:07,605 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:07,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:07,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T03:50:07,611 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:07,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:07,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T03:50:07,615 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:07,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:07,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T03:50:07,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:07,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:07,620 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,623 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,624 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,629 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,630 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,633 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:07,637 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:07,644 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:07,645 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59699699, jitterRate=-0.11040516197681427}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:07,653 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731037807556Initializing all the Stores at 1731037807559 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037807559Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037807559Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037807560 (+1 ms)Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037807560Cleaning up temporary data from old regions at 1731037807630 (+70 ms)Region opened successfully at 1731037807653 (+23 ms) 2024-11-08T03:50:07,654 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T03:50:07,686 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bd8cdf4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:07,712 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T03:50:07,721 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T03:50:07,721 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T03:50:07,723 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T03:50:07,724 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-08T03:50:07,729 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-08T03:50:07,730 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T03:50:07,758 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T03:50:07,767 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T03:50:07,820 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T03:50:07,823 INFO [master/350fccc412b5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T03:50:07,825 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T03:50:07,830 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T03:50:07,833 INFO [master/350fccc412b5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T03:50:07,837 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T03:50:07,849 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T03:50:07,851 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T03:50:07,862 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T03:50:07,882 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T03:50:07,893 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T03:50:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:07,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:07,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,909 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=350fccc412b5,45513,1731037806126, sessionid=0x1011890b6a10000, setting cluster-up flag (Was=false) 2024-11-08T03:50:07,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,968 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T03:50:07,973 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=350fccc412b5,45513,1731037806126 2024-11-08T03:50:07,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:07,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:08,030 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T03:50:08,033 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=350fccc412b5,45513,1731037806126 2024-11-08T03:50:08,043 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T03:50:08,059 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(746): ClusterId : 30486984-cf3e-4b43-9c05-189e37fbcd03 2024-11-08T03:50:08,059 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(746): ClusterId : 30486984-cf3e-4b43-9c05-189e37fbcd03 2024-11-08T03:50:08,059 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(746): ClusterId : 30486984-cf3e-4b43-9c05-189e37fbcd03 2024-11-08T03:50:08,061 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:08,061 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:08,061 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:08,085 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:08,085 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:08,085 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:08,085 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:08,085 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:08,085 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:08,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-08T03:50:08,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-08T03:50:08,105 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:08,105 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:08,105 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:08,106 DEBUG [RS:1;350fccc412b5:36433 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fdbd1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:08,106 DEBUG [RS:2;350fccc412b5:42565 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@671fd70c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:08,106 DEBUG [RS:0;350fccc412b5:37985 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@386137eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:08,121 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:08,124 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;350fccc412b5:36433 2024-11-08T03:50:08,128 INFO [RS:1;350fccc412b5:36433 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:08,128 INFO [RS:1;350fccc412b5:36433 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:08,128 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:08,128 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;350fccc412b5:42565 2024-11-08T03:50:08,128 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;350fccc412b5:37985 2024-11-08T03:50:08,128 INFO [RS:2;350fccc412b5:42565 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:08,128 INFO [RS:0;350fccc412b5:37985 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:08,128 INFO [RS:2;350fccc412b5:42565 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:08,128 INFO [RS:0;350fccc412b5:37985 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:08,129 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:08,129 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:08,131 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,45513,1731037806126 with port=36433, startcode=1731037806965 2024-11-08T03:50:08,131 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,45513,1731037806126 with port=42565, startcode=1731037807016 2024-11-08T03:50:08,131 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,45513,1731037806126 with port=37985, startcode=1731037806857 2024-11-08T03:50:08,132 INFO [master/350fccc412b5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T03:50:08,139 INFO [master/350fccc412b5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T03:50:08,144 DEBUG [RS:0;350fccc412b5:37985 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:08,144 DEBUG [RS:2;350fccc412b5:42565 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:08,144 DEBUG [RS:1;350fccc412b5:36433 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:08,145 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 350fccc412b5,45513,1731037806126 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T03:50:08,152 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:08,152 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:08,152 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:08,153 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:08,153 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/350fccc412b5:0, corePoolSize=10, maxPoolSize=10 2024-11-08T03:50:08,153 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,153 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:08,153 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,155 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731037838154 2024-11-08T03:50:08,156 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T03:50:08,158 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T03:50:08,162 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T03:50:08,162 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T03:50:08,162 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T03:50:08,163 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T03:50:08,165 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,168 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T03:50:08,169 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T03:50:08,170 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T03:50:08,171 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T03:50:08,172 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T03:50:08,174 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:08,175 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T03:50:08,175 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037808175,5,FailOnTimeoutGroup] 2024-11-08T03:50:08,176 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037808176,5,FailOnTimeoutGroup] 2024-11-08T03:50:08,176 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,176 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T03:50:08,177 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,178 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,182 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:08,182 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T03:50:08,186 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:52131, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:08,186 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60549, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:08,186 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:35023, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:08,190 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:08,190 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:08,194 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,196 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,201 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:58140 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58140 dst: /127.0.0.1:42839 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:08,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,208 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-08T03:50:08,211 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 2024-11-08T03:50:08,211 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41665 2024-11-08T03:50:08,212 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:08,212 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:08,213 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,214 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T03:50:08,214 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 2024-11-08T03:50:08,214 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45513 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,214 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41665 2024-11-08T03:50:08,214 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:08,214 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 2024-11-08T03:50:08,218 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 2024-11-08T03:50:08,218 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41665 2024-11-08T03:50:08,218 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:08,222 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:08,222 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:08,229 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41458 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41458 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:08,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-08T03:50:08,236 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:08,237 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:08,239 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T03:50:08,242 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T03:50:08,243 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:08,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:08,244 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T03:50:08,247 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T03:50:08,248 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:08,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:08,249 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T03:50:08,253 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T03:50:08,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:08,253 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:08,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:08,255 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T03:50:08,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T03:50:08,259 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:08,260 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:08,260 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T03:50:08,262 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740 2024-11-08T03:50:08,263 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740 2024-11-08T03:50:08,266 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T03:50:08,267 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T03:50:08,268 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:08,271 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T03:50:08,273 DEBUG [RS:1;350fccc412b5:36433 {}] zookeeper.ZKUtil(111): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,273 DEBUG [RS:0;350fccc412b5:37985 {}] zookeeper.ZKUtil(111): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,273 WARN [RS:1;350fccc412b5:36433 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:08,273 WARN [RS:0;350fccc412b5:37985 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:08,273 INFO [RS:1;350fccc412b5:36433 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T03:50:08,273 INFO [RS:0;350fccc412b5:37985 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T03:50:08,273 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,273 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,274 DEBUG [RS:2;350fccc412b5:42565 {}] zookeeper.ZKUtil(111): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,275 WARN [RS:2;350fccc412b5:42565 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:08,275 INFO [RS:2;350fccc412b5:42565 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T03:50:08,275 DEBUG [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,36433,1731037806965] 2024-11-08T03:50:08,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,37985,1731037806857] 2024-11-08T03:50:08,275 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,42565,1731037807016] 2024-11-08T03:50:08,284 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:08,285 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73877744, jitterRate=0.10086417198181152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:08,290 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731037808237Initializing all the Stores at 1731037808239 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037808239Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037808239Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037808239Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037808239Cleaning up temporary data from old regions at 1731037808267 (+28 ms)Region opened successfully at 1731037808290 (+23 ms) 2024-11-08T03:50:08,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T03:50:08,291 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T03:50:08,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T03:50:08,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T03:50:08,291 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T03:50:08,293 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:08,293 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731037808291Disabling compacts and flushes for region at 1731037808291Disabling writes for close at 1731037808291Writing region close event to WAL at 1731037808292 (+1 ms)Closed at 1731037808293 (+1 ms) 2024-11-08T03:50:08,296 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:08,296 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T03:50:08,303 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T03:50:08,304 INFO [RS:0;350fccc412b5:37985 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:08,304 INFO [RS:2;350fccc412b5:42565 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:08,304 INFO [RS:1;350fccc412b5:36433 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:08,313 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T03:50:08,317 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T03:50:08,323 INFO [RS:0;350fccc412b5:37985 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:08,324 INFO [RS:2;350fccc412b5:42565 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:08,328 INFO [RS:1;350fccc412b5:36433 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:08,332 INFO [RS:0;350fccc412b5:37985 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:08,332 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,332 INFO [RS:1;350fccc412b5:36433 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:08,332 INFO [RS:2;350fccc412b5:42565 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:08,332 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,332 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,333 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:08,333 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:08,333 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:08,340 INFO [RS:0;350fccc412b5:37985 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:08,340 INFO [RS:1;350fccc412b5:36433 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:08,340 INFO [RS:2;350fccc412b5:42565 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:08,341 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,341 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,341 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,342 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:08,342 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:08,342 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,343 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,343 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,343 DEBUG [RS:2;350fccc412b5:42565 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,344 DEBUG [RS:1;350fccc412b5:36433 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,344 DEBUG [RS:0;350fccc412b5:37985 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,345 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,36433,1731037806965-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42565,1731037807016-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,347 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,37985,1731037806857-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:08,364 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:08,364 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:08,366 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,37985,1731037806857-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,366 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42565,1731037807016-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,366 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,366 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,367 INFO [RS:0;350fccc412b5:37985 {}] regionserver.Replication(171): 350fccc412b5,37985,1731037806857 started 2024-11-08T03:50:08,367 INFO [RS:2;350fccc412b5:42565 {}] regionserver.Replication(171): 350fccc412b5,42565,1731037807016 started 2024-11-08T03:50:08,369 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:08,369 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,36433,1731037806965-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,369 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,370 INFO [RS:1;350fccc412b5:36433 {}] regionserver.Replication(171): 350fccc412b5,36433,1731037806965 started 2024-11-08T03:50:08,383 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,384 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,384 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,36433,1731037806965, RpcServer on 350fccc412b5/172.17.0.3:36433, sessionid=0x1011890b6a10002 2024-11-08T03:50:08,384 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,37985,1731037806857, RpcServer on 350fccc412b5/172.17.0.3:37985, sessionid=0x1011890b6a10001 2024-11-08T03:50:08,384 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:08,384 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:08,385 DEBUG [RS:1;350fccc412b5:36433 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,385 DEBUG [RS:0;350fccc412b5:37985 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,385 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,36433,1731037806965' 2024-11-08T03:50:08,385 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,37985,1731037806857' 2024-11-08T03:50:08,385 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:08,385 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:08,386 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:08,386 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:08,386 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,36433,1731037806965 2024-11-08T03:50:08,386 DEBUG [RS:0;350fccc412b5:37985 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,36433,1731037806965' 2024-11-08T03:50:08,386 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,37985,1731037806857' 2024-11-08T03:50:08,386 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:08,387 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:08,387 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:08,387 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:08,387 DEBUG [RS:0;350fccc412b5:37985 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:08,387 DEBUG [RS:1;350fccc412b5:36433 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:08,387 INFO [RS:0;350fccc412b5:37985 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:08,388 INFO [RS:1;350fccc412b5:36433 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:08,388 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:08,388 INFO [RS:0;350fccc412b5:37985 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:08,388 INFO [RS:1;350fccc412b5:36433 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:08,388 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,42565,1731037807016, RpcServer on 350fccc412b5/172.17.0.3:42565, sessionid=0x1011890b6a10003 2024-11-08T03:50:08,388 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:08,388 DEBUG [RS:2;350fccc412b5:42565 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,388 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,42565,1731037807016' 2024-11-08T03:50:08,388 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:08,389 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:08,389 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:08,389 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:08,389 DEBUG [RS:2;350fccc412b5:42565 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,42565,1731037807016 2024-11-08T03:50:08,389 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,42565,1731037807016' 2024-11-08T03:50:08,390 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:08,390 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:08,390 DEBUG [RS:2;350fccc412b5:42565 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:08,391 INFO [RS:2;350fccc412b5:42565 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:08,391 INFO [RS:2;350fccc412b5:42565 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:08,468 WARN [350fccc412b5:45513 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T03:50:08,497 INFO [RS:0;350fccc412b5:37985 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T03:50:08,497 INFO [RS:1;350fccc412b5:36433 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T03:50:08,497 INFO [RS:2;350fccc412b5:42565 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-08T03:50:08,500 INFO [RS:1;350fccc412b5:36433 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C36433%2C1731037806965, suffix=, logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,36433,1731037806965, archiveDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs, maxLogs=32 2024-11-08T03:50:08,500 INFO [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C37985%2C1731037806857, suffix=, logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857, archiveDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs, maxLogs=32 2024-11-08T03:50:08,500 INFO [RS:2;350fccc412b5:42565 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C42565%2C1731037807016, suffix=, logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,42565,1731037807016, archiveDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs, maxLogs=32 2024-11-08T03:50:08,519 DEBUG [RS:0;350fccc412b5:37985 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857/350fccc412b5%2C37985%2C1731037806857.1731037808506, exclude list is [], retry=0 2024-11-08T03:50:08,519 DEBUG [RS:1;350fccc412b5:36433 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,36433,1731037806965/350fccc412b5%2C36433%2C1731037806965.1731037808506, exclude list is [], retry=0 2024-11-08T03:50:08,521 DEBUG [RS:2;350fccc412b5:42565 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,42565,1731037807016/350fccc412b5%2C42565%2C1731037807016.1731037808507, exclude list is [], retry=0 2024-11-08T03:50:08,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42839,DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9,DISK] 2024-11-08T03:50:08,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42839,DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9,DISK] 2024-11-08T03:50:08,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33069,DS-c8f168e1-0407-45f4-af3d-da4b6037178e,DISK] 2024-11-08T03:50:08,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43295,DS-6a0d724f-868c-4cfd-899e-76beed9138ed,DISK] 2024-11-08T03:50:08,525 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43295,DS-6a0d724f-868c-4cfd-899e-76beed9138ed,DISK] 2024-11-08T03:50:08,545 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33069,DS-c8f168e1-0407-45f4-af3d-da4b6037178e,DISK] 2024-11-08T03:50:08,546 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33069,DS-c8f168e1-0407-45f4-af3d-da4b6037178e,DISK] 2024-11-08T03:50:08,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42839,DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9,DISK] 2024-11-08T03:50:08,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43295,DS-6a0d724f-868c-4cfd-899e-76beed9138ed,DISK] 2024-11-08T03:50:08,551 INFO [RS:1;350fccc412b5:36433 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,36433,1731037806965/350fccc412b5%2C36433%2C1731037806965.1731037808506 2024-11-08T03:50:08,552 INFO [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857/350fccc412b5%2C37985%2C1731037806857.1731037808506 2024-11-08T03:50:08,553 DEBUG [RS:1;350fccc412b5:36433 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37607:37607),(127.0.0.1/127.0.0.1:33831:33831),(127.0.0.1/127.0.0.1:35489:35489)] 2024-11-08T03:50:08,553 DEBUG [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:37607:37607),(127.0.0.1/127.0.0.1:35489:35489),(127.0.0.1/127.0.0.1:33831:33831)] 2024-11-08T03:50:08,554 INFO [RS:2;350fccc412b5:42565 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,42565,1731037807016/350fccc412b5%2C42565%2C1731037807016.1731037808507 2024-11-08T03:50:08,554 DEBUG [RS:2;350fccc412b5:42565 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33831:33831),(127.0.0.1/127.0.0.1:35489:35489),(127.0.0.1/127.0.0.1:37607:37607)] 2024-11-08T03:50:08,722 DEBUG [350fccc412b5:45513 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T03:50:08,734 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(204): Hosts are {350fccc412b5=0} racks are {/default-rack=0} 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T03:50:08,740 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T03:50:08,740 INFO [350fccc412b5:45513 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T03:50:08,740 INFO [350fccc412b5:45513 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T03:50:08,741 INFO [350fccc412b5:45513 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T03:50:08,741 DEBUG [350fccc412b5:45513 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T03:50:08,747 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=350fccc412b5,37985,1731037806857 2024-11-08T03:50:08,754 INFO [PEWorker-2 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 350fccc412b5,37985,1731037806857, state=OPENING 2024-11-08T03:50:08,809 DEBUG [PEWorker-2 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T03:50:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:08,820 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:08,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:08,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:08,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:08,821 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:08,822 DEBUG [PEWorker-2 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T03:50:08,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=350fccc412b5,37985,1731037806857}] 2024-11-08T03:50:09,001 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T03:50:09,004 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56763, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T03:50:09,015 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T03:50:09,015 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-08T03:50:09,016 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-08T03:50:09,019 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C37985%2C1731037806857.meta, suffix=.meta, logDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857, archiveDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs, maxLogs=32 2024-11-08T03:50:09,034 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(525): When create output stream for /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857/350fccc412b5%2C37985%2C1731037806857.meta.1731037809021.meta, exclude list is [], retry=0 2024-11-08T03:50:09,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42839,DS-a5df7eb9-f4d2-4424-9980-dc0b0559d8f9,DISK] 2024-11-08T03:50:09,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43295,DS-6a0d724f-868c-4cfd-899e-76beed9138ed,DISK] 2024-11-08T03:50:09,039 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33069,DS-c8f168e1-0407-45f4-af3d-da4b6037178e,DISK] 2024-11-08T03:50:09,042 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/WALs/350fccc412b5,37985,1731037806857/350fccc412b5%2C37985%2C1731037806857.meta.1731037809021.meta 2024-11-08T03:50:09,042 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33831:33831),(127.0.0.1/127.0.0.1:37607:37607),(127.0.0.1/127.0.0.1:35489:35489)] 2024-11-08T03:50:09,043 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:09,044 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T03:50:09,046 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T03:50:09,051 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T03:50:09,054 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T03:50:09,055 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:09,055 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T03:50:09,055 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T03:50:09,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T03:50:09,059 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T03:50:09,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:09,061 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T03:50:09,062 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T03:50:09,062 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:09,063 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T03:50:09,065 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T03:50:09,065 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:09,066 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T03:50:09,067 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T03:50:09,067 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,068 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:09,069 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T03:50:09,070 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740 2024-11-08T03:50:09,073 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740 2024-11-08T03:50:09,075 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T03:50:09,075 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T03:50:09,076 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:09,079 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T03:50:09,080 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59808218, jitterRate=-0.10878810286521912}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:09,080 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T03:50:09,082 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731037809055Writing region info on filesystem at 1731037809056 (+1 ms)Initializing all the Stores at 1731037809057 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037809057Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037809058 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037809058Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037809058Cleaning up temporary data from old regions at 1731037809075 (+17 ms)Running coprocessor post-open hooks at 1731037809080 (+5 ms)Region opened successfully at 1731037809082 (+2 ms) 2024-11-08T03:50:09,088 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731037808990 2024-11-08T03:50:09,099 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T03:50:09,100 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T03:50:09,101 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=350fccc412b5,37985,1731037806857 2024-11-08T03:50:09,104 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 350fccc412b5,37985,1731037806857, state=OPEN 2024-11-08T03:50:09,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:09,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:09,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:09,114 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:09,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:09,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:09,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:09,115 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:09,115 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=350fccc412b5,37985,1731037806857 2024-11-08T03:50:09,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T03:50:09,121 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=350fccc412b5,37985,1731037806857 in 292 msec 2024-11-08T03:50:09,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T03:50:09,128 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 820 msec 2024-11-08T03:50:09,130 DEBUG [PEWorker-3 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:09,130 INFO [PEWorker-3 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T03:50:09,152 DEBUG [PEWorker-3 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T03:50:09,153 DEBUG [PEWorker-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=350fccc412b5,37985,1731037806857, seqNum=-1] 2024-11-08T03:50:09,170 DEBUG [PEWorker-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T03:50:09,172 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T03:50:09,195 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1200 sec 2024-11-08T03:50:09,195 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731037809195, completionTime=-1 2024-11-08T03:50:09,216 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T03:50:09,216 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T03:50:09,245 INFO [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T03:50:09,245 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731037869245 2024-11-08T03:50:09,245 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731037929245 2024-11-08T03:50:09,245 INFO [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 28 msec 2024-11-08T03:50:09,247 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-08T03:50:09,257 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,258 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,258 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,259 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-350fccc412b5:45513, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,260 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,260 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,266 DEBUG [master/350fccc412b5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T03:50:09,286 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.159sec 2024-11-08T03:50:09,288 INFO [master/350fccc412b5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T03:50:09,289 INFO [master/350fccc412b5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T03:50:09,290 INFO [master/350fccc412b5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T03:50:09,290 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T03:50:09,290 INFO [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T03:50:09,291 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:09,291 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T03:50:09,296 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T03:50:09,297 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T03:50:09,297 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,45513,1731037806126-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:09,370 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11ea7dba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:09,375 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-08T03:50:09,375 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-08T03:50:09,378 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 350fccc412b5,45513,-1 for getting cluster id 2024-11-08T03:50:09,381 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T03:50:09,389 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '30486984-cf3e-4b43-9c05-189e37fbcd03' 2024-11-08T03:50:09,391 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T03:50:09,392 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "30486984-cf3e-4b43-9c05-189e37fbcd03" 2024-11-08T03:50:09,392 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@53d7b575, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:09,392 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [350fccc412b5,45513,-1] 2024-11-08T03:50:09,395 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T03:50:09,397 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:09,398 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36684, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T03:50:09,401 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2866f9eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:09,401 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T03:50:09,407 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=350fccc412b5,37985,1731037806857, seqNum=-1] 2024-11-08T03:50:09,407 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T03:50:09,409 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:48822, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T03:50:09,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=350fccc412b5,45513,1731037806126 2024-11-08T03:50:09,434 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T03:50:09,438 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 350fccc412b5,45513,1731037806126 2024-11-08T03:50:09,440 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@118b3aa5 2024-11-08T03:50:09,441 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T03:50:09,443 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36694, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T03:50:09,449 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T03:50:09,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T03:50:09,461 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T03:50:09,463 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T03:50:09,463 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,466 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T03:50:09,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:09,474 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:09,475 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:09,478 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:49042 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:43295:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49042 dst: /127.0.0.1:43295 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:09,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-08T03:50:09,484 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:09,486 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3a40058fbae3361f5aec1138d178c54a, NAME => 'TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407 2024-11-08T03:50:09,492 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:09,492 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:09,496 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:58188 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58188 dst: /127.0.0.1:42839 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:09,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-08T03:50:09,502 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 3a40058fbae3361f5aec1138d178c54a, disabling compactions & flushes 2024-11-08T03:50:09,503 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. after waiting 0 ms 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,503 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,503 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3a40058fbae3361f5aec1138d178c54a: Waiting for close lock at 1731037809503Disabling compacts and flushes for region at 1731037809503Disabling writes for close at 1731037809503Writing region close event to WAL at 1731037809503Closed at 1731037809503 2024-11-08T03:50:09,506 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T03:50:09,510 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731037809506"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731037809506"}]},"ts":"1731037809506"} 2024-11-08T03:50:09,515 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T03:50:09,517 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T03:50:09,520 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731037809518"}]},"ts":"1731037809518"} 2024-11-08T03:50:09,526 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T03:50:09,527 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {350fccc412b5=0} racks are {/default-rack=0} 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T03:50:09,528 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T03:50:09,528 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T03:50:09,529 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T03:50:09,529 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T03:50:09,529 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T03:50:09,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3a40058fbae3361f5aec1138d178c54a, ASSIGN}] 2024-11-08T03:50:09,533 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3a40058fbae3361f5aec1138d178c54a, ASSIGN 2024-11-08T03:50:09,535 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3a40058fbae3361f5aec1138d178c54a, ASSIGN; state=OFFLINE, location=350fccc412b5,36433,1731037806965; forceNewPlan=false, retain=false 2024-11-08T03:50:09,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:09,689 INFO [350fccc412b5:45513 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T03:50:09,690 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3a40058fbae3361f5aec1138d178c54a, regionState=OPENING, regionLocation=350fccc412b5,36433,1731037806965 2024-11-08T03:50:09,696 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3a40058fbae3361f5aec1138d178c54a, ASSIGN because future has completed 2024-11-08T03:50:09,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a40058fbae3361f5aec1138d178c54a, server=350fccc412b5,36433,1731037806965}] 2024-11-08T03:50:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:09,852 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T03:50:09,854 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60309, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T03:50:09,860 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,861 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 3a40058fbae3361f5aec1138d178c54a, NAME => 'TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:09,861 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,861 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:09,861 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,861 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,864 INFO [StoreOpener-3a40058fbae3361f5aec1138d178c54a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,866 INFO [StoreOpener-3a40058fbae3361f5aec1138d178c54a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a40058fbae3361f5aec1138d178c54a columnFamilyName cf 2024-11-08T03:50:09,866 DEBUG [StoreOpener-3a40058fbae3361f5aec1138d178c54a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:09,867 INFO [StoreOpener-3a40058fbae3361f5aec1138d178c54a-1 {}] regionserver.HStore(327): Store=3a40058fbae3361f5aec1138d178c54a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:09,868 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,869 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,869 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,870 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,870 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,872 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,878 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:09,879 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 3a40058fbae3361f5aec1138d178c54a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73083274, jitterRate=0.08902564644813538}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T03:50:09,879 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:09,880 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 3a40058fbae3361f5aec1138d178c54a: Running coprocessor pre-open hook at 1731037809862Writing region info on filesystem at 1731037809862Initializing all the Stores at 1731037809863 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037809864 (+1 ms)Cleaning up temporary data from old regions at 1731037809870 (+6 ms)Running coprocessor post-open hooks at 1731037809879 (+9 ms)Region opened successfully at 1731037809880 (+1 ms) 2024-11-08T03:50:09,882 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a., pid=6, masterSystemTime=1731037809852 2024-11-08T03:50:09,885 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,886 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:09,887 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=3a40058fbae3361f5aec1138d178c54a, regionState=OPEN, openSeqNum=2, regionLocation=350fccc412b5,36433,1731037806965 2024-11-08T03:50:09,891 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a40058fbae3361f5aec1138d178c54a, server=350fccc412b5,36433,1731037806965 because future has completed 2024-11-08T03:50:09,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T03:50:09,896 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 3a40058fbae3361f5aec1138d178c54a, server=350fccc412b5,36433,1731037806965 in 196 msec 2024-11-08T03:50:09,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T03:50:09,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=3a40058fbae3361f5aec1138d178c54a, ASSIGN in 366 msec 2024-11-08T03:50:09,901 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T03:50:09,901 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731037809901"}]},"ts":"1731037809901"} 2024-11-08T03:50:09,904 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T03:50:09,905 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T03:50:09,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 453 msec 2024-11-08T03:50:10,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:10,099 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T03:50:10,099 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T03:50:10,103 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T03:50:10,113 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T03:50:10,114 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T03:50:10,115 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T03:50:10,123 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a., hostname=350fccc412b5,36433,1731037806965, seqNum=2] 2024-11-08T03:50:10,125 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T03:50:10,127 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47898, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T03:50:10,136 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-08T03:50:10,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T03:50:10,144 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T03:50:10,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:10,147 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T03:50:10,149 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T03:50:10,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:10,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36433 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T03:50:10,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,322 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 3a40058fbae3361f5aec1138d178c54a 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T03:50:10,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/.tmp/cf/3c4671c4e42647d5b8051db3ee9d53c7 is 36, key is row/cf:cq/1731037810128/Put/seqid=0 2024-11-08T03:50:10,379 WARN [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:10,379 WARN [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:10,383 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_779414877_22 at /127.0.0.1:41520 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41520 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:10,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-08T03:50:10,388 WARN [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:10,388 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/.tmp/cf/3c4671c4e42647d5b8051db3ee9d53c7 2024-11-08T03:50:10,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/.tmp/cf/3c4671c4e42647d5b8051db3ee9d53c7 as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/cf/3c4671c4e42647d5b8051db3ee9d53c7 2024-11-08T03:50:10,441 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/cf/3c4671c4e42647d5b8051db3ee9d53c7, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T03:50:10,450 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 3a40058fbae3361f5aec1138d178c54a in 126ms, sequenceid=5, compaction requested=false 2024-11-08T03:50:10,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-08T03:50:10,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 3a40058fbae3361f5aec1138d178c54a: 2024-11-08T03:50:10,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T03:50:10,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T03:50:10,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T03:50:10,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 310 msec 2024-11-08T03:50:10,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:10,466 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 326 msec 2024-11-08T03:50:10,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45513 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:10,775 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T03:50:10,789 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T03:50:10,789 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T03:50:10,789 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:10,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,794 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,794 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T03:50:10,795 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T03:50:10,795 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1302508905, stopped=false 2024-11-08T03:50:10,795 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=350fccc412b5,45513,1731037806126 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:10,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:10,915 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T03:50:10,917 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T03:50:10,917 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:10,917 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:10,917 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:10,917 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:10,917 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:10,918 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,919 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,37985,1731037806857' ***** 2024-11-08T03:50:10,919 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:10,919 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,36433,1731037806965' ***** 2024-11-08T03:50:10,920 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:10,920 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,42565,1731037807016' ***** 2024-11-08T03:50:10,920 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:10,920 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:10,920 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:10,920 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:10,921 INFO [RS:2;350fccc412b5:42565 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:10,921 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:10,921 INFO [RS:0;350fccc412b5:37985 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:10,921 INFO [RS:1;350fccc412b5:36433 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:10,921 INFO [RS:2;350fccc412b5:42565 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:10,921 INFO [RS:1;350fccc412b5:36433 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:10,921 INFO [RS:0;350fccc412b5:37985 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:10,921 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,42565,1731037807016 2024-11-08T03:50:10,921 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,37985,1731037806857 2024-11-08T03:50:10,921 INFO [RS:2;350fccc412b5:42565 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:10,921 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(3091): Received CLOSE for 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:10,921 INFO [RS:0;350fccc412b5:37985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:10,921 INFO [RS:2;350fccc412b5:42565 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;350fccc412b5:42565. 2024-11-08T03:50:10,921 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:10,921 INFO [RS:0;350fccc412b5:37985 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;350fccc412b5:37985. 2024-11-08T03:50:10,921 DEBUG [RS:2;350fccc412b5:42565 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:10,921 DEBUG [RS:2;350fccc412b5:42565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,921 DEBUG [RS:0;350fccc412b5:37985 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:10,921 DEBUG [RS:0;350fccc412b5:37985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,921 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:10,922 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,42565,1731037807016; all regions closed. 2024-11-08T03:50:10,922 INFO [RS:0;350fccc412b5:37985 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:10,922 INFO [RS:0;350fccc412b5:37985 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:10,922 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,36433,1731037806965 2024-11-08T03:50:10,922 INFO [RS:0;350fccc412b5:37985 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:10,922 INFO [RS:1;350fccc412b5:36433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:10,922 INFO [RS:1;350fccc412b5:36433 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;350fccc412b5:36433. 2024-11-08T03:50:10,922 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T03:50:10,922 DEBUG [RS:1;350fccc412b5:36433 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:10,922 DEBUG [RS:1;350fccc412b5:36433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,923 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T03:50:10,923 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3a40058fbae3361f5aec1138d178c54a, disabling compactions & flushes 2024-11-08T03:50:10,923 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1325): Online Regions={3a40058fbae3361f5aec1138d178c54a=TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a.} 2024-11-08T03:50:10,923 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-08T03:50:10,923 INFO [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,923 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. after waiting 0 ms 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T03:50:10,923 DEBUG [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1351): Waiting on 3a40058fbae3361f5aec1138d178c54a 2024-11-08T03:50:10,923 DEBUG [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,923 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T03:50:10,923 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T03:50:10,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741828_1018 (size=93) 2024-11-08T03:50:10,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_1073741828_1018 (size=93) 2024-11-08T03:50:10,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_1073741828_1018 (size=93) 2024-11-08T03:50:10,936 DEBUG [RS:2;350fccc412b5:42565 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs 2024-11-08T03:50:10,936 INFO [RS:2;350fccc412b5:42565 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 350fccc412b5%2C42565%2C1731037807016:(num 1731037808507) 2024-11-08T03:50:10,936 DEBUG [RS:2;350fccc412b5:42565 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:10,936 INFO [RS:2;350fccc412b5:42565 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:10,936 INFO [RS:2;350fccc412b5:42565 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:10,937 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:10,937 INFO [RS:2;350fccc412b5:42565 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42565 2024-11-08T03:50:10,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,42565,1731037807016 2024-11-08T03:50:10,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:10,946 INFO [RS:2;350fccc412b5:42565 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:10,947 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,42565,1731037807016] 2024-11-08T03:50:10,950 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:10,950 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:10,951 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:10,954 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/default/TestHBaseWalOnEC/3a40058fbae3361f5aec1138d178c54a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T03:50:10,956 INFO [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,956 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3a40058fbae3361f5aec1138d178c54a: Waiting for close lock at 1731037810922Running coprocessor pre-close hooks at 1731037810923 (+1 ms)Disabling compacts and flushes for region at 1731037810923Disabling writes for close at 1731037810923Writing region close event to WAL at 1731037810929 (+6 ms)Running coprocessor post-close hooks at 1731037810955 (+26 ms)Closed at 1731037810956 (+1 ms) 2024-11-08T03:50:10,957 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a. 2024-11-08T03:50:10,958 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/info/d7c1de8b6a104f0bbe1f3f249d48493d is 153, key is TestHBaseWalOnEC,,1731037809445.3a40058fbae3361f5aec1138d178c54a./info:regioninfo/1731037809887/Put/seqid=0 2024-11-08T03:50:10,962 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:10,962 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:10,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2147315106_22 at /127.0.0.1:41546 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41546 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:10,967 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,42565,1731037807016 already deleted, retry=false 2024-11-08T03:50:10,967 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,42565,1731037807016 expired; onlineServers=2 2024-11-08T03:50:10,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-08T03:50:10,971 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:10,971 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/info/d7c1de8b6a104f0bbe1f3f249d48493d 2024-11-08T03:50:10,996 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/ns/1df496e64ba34d68b827857f29eee50d is 43, key is default/ns:d/1731037809176/Put/seqid=0 2024-11-08T03:50:10,999 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:10,999 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,004 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2147315106_22 at /127.0.0.1:41574 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41574 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-08T03:50:11,009 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,009 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/ns/1df496e64ba34d68b827857f29eee50d 2024-11-08T03:50:11,036 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/table/29dba73d11f042469cf1d5bc6ea1a08f is 52, key is TestHBaseWalOnEC/table:state/1731037809901/Put/seqid=0 2024-11-08T03:50:11,038 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,038 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,041 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2147315106_22 at /127.0.0.1:49068 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:43295:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49068 dst: /127.0.0.1:43295 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-08T03:50:11,046 WARN [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,046 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/table/29dba73d11f042469cf1d5bc6ea1a08f 2024-11-08T03:50:11,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42565-0x1011890b6a10003, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,057 INFO [RS:2;350fccc412b5:42565 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:11,058 INFO [RS:2;350fccc412b5:42565 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,42565,1731037807016; zookeeper connection closed. 2024-11-08T03:50:11,058 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/info/d7c1de8b6a104f0bbe1f3f249d48493d as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/info/d7c1de8b6a104f0bbe1f3f249d48493d 2024-11-08T03:50:11,058 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5ba7dff5 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5ba7dff5 2024-11-08T03:50:11,068 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/info/d7c1de8b6a104f0bbe1f3f249d48493d, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T03:50:11,071 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/ns/1df496e64ba34d68b827857f29eee50d as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/ns/1df496e64ba34d68b827857f29eee50d 2024-11-08T03:50:11,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-08T03:50:11,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-08T03:50:11,083 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/ns/1df496e64ba34d68b827857f29eee50d, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T03:50:11,086 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/.tmp/table/29dba73d11f042469cf1d5bc6ea1a08f as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/table/29dba73d11f042469cf1d5bc6ea1a08f 2024-11-08T03:50:11,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-08T03:50:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-08T03:50:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-08T03:50:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-08T03:50:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-08T03:50:11,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-08T03:50:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-08T03:50:11,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-08T03:50:11,097 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/table/29dba73d11f042469cf1d5bc6ea1a08f, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T03:50:11,099 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false 2024-11-08T03:50:11,099 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-08T03:50:11,110 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T03:50:11,111 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T03:50:11,111 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:11,111 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731037810923Running coprocessor pre-close hooks at 1731037810923Disabling compacts and flushes for region at 1731037810923Disabling writes for close at 1731037810923Obtaining lock to block concurrent updates at 1731037810923Preparing flush snapshotting stores in 1588230740 at 1731037810923Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731037810924 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731037810926 (+2 ms)Flushing 1588230740/info: creating writer at 1731037810926Flushing 1588230740/info: appending metadata at 1731037810953 (+27 ms)Flushing 1588230740/info: closing flushed file at 1731037810953Flushing 1588230740/ns: creating writer at 1731037810980 (+27 ms)Flushing 1588230740/ns: appending metadata at 1731037810995 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731037810995Flushing 1588230740/table: creating writer at 1731037811018 (+23 ms)Flushing 1588230740/table: appending metadata at 1731037811035 (+17 ms)Flushing 1588230740/table: closing flushed file at 1731037811035Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10f9531c: reopening flushed file at 1731037811056 (+21 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56dd9e44: reopening flushed file at 1731037811069 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4320be6b: reopening flushed file at 1731037811084 (+15 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 175ms, sequenceid=11, compaction requested=false at 1731037811099 (+15 ms)Writing region close event to WAL at 1731037811100 (+1 ms)Running coprocessor post-close hooks at 1731037811110 (+10 ms)Closed at 1731037811111 (+1 ms) 2024-11-08T03:50:11,111 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:11,123 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,36433,1731037806965; all regions closed. 2024-11-08T03:50:11,123 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,37985,1731037806857; all regions closed. 2024-11-08T03:50:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741829_1019 (size=2751) 2024-11-08T03:50:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741826_1016 (size=1298) 2024-11-08T03:50:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_1073741826_1016 (size=1298) 2024-11-08T03:50:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_1073741829_1019 (size=2751) 2024-11-08T03:50:11,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_1073741829_1019 (size=2751) 2024-11-08T03:50:11,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_1073741826_1016 (size=1298) 2024-11-08T03:50:11,133 DEBUG [RS:1;350fccc412b5:36433 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs 2024-11-08T03:50:11,133 INFO [RS:1;350fccc412b5:36433 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 350fccc412b5%2C36433%2C1731037806965:(num 1731037808506) 2024-11-08T03:50:11,133 DEBUG [RS:1;350fccc412b5:36433 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:11,133 INFO [RS:1;350fccc412b5:36433 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:11,133 INFO [RS:1;350fccc412b5:36433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:11,133 DEBUG [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs 2024-11-08T03:50:11,133 INFO [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 350fccc412b5%2C37985%2C1731037806857.meta:.meta(num 1731037809021) 2024-11-08T03:50:11,133 INFO [RS:1;350fccc412b5:36433 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:11,134 INFO [RS:1;350fccc412b5:36433 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:11,134 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:11,134 INFO [RS:1;350fccc412b5:36433 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:11,134 INFO [RS:1;350fccc412b5:36433 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:11,134 INFO [RS:1;350fccc412b5:36433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:11,134 INFO [RS:1;350fccc412b5:36433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:36433 2024-11-08T03:50:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_1073741827_1017 (size=93) 2024-11-08T03:50:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_1073741827_1017 (size=93) 2024-11-08T03:50:11,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741827_1017 (size=93) 2024-11-08T03:50:11,140 DEBUG [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/oldWALs 2024-11-08T03:50:11,140 INFO [RS:0;350fccc412b5:37985 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 350fccc412b5%2C37985%2C1731037806857:(num 1731037808506) 2024-11-08T03:50:11,140 DEBUG [RS:0;350fccc412b5:37985 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:11,140 INFO [RS:0;350fccc412b5:37985 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:11,140 INFO [RS:0;350fccc412b5:37985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:11,140 INFO [RS:0;350fccc412b5:37985 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:11,140 INFO [RS:0;350fccc412b5:37985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:11,140 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:11,141 INFO [RS:0;350fccc412b5:37985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37985 2024-11-08T03:50:11,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,36433,1731037806965 2024-11-08T03:50:11,146 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:11,146 INFO [RS:1;350fccc412b5:36433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:11,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,37985,1731037806857 2024-11-08T03:50:11,156 INFO [RS:0;350fccc412b5:37985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:11,167 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,36433,1731037806965] 2024-11-08T03:50:11,251 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,36433,1731037806965 already deleted, retry=false 2024-11-08T03:50:11,251 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,36433,1731037806965 expired; onlineServers=1 2024-11-08T03:50:11,251 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,37985,1731037806857] 2024-11-08T03:50:11,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,267 INFO [RS:1;350fccc412b5:36433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:11,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36433-0x1011890b6a10002, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,267 INFO [RS:1;350fccc412b5:36433 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,36433,1731037806965; zookeeper connection closed. 2024-11-08T03:50:11,268 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5649cd60 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5649cd60 2024-11-08T03:50:11,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,278 INFO [RS:0;350fccc412b5:37985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:11,278 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37985-0x1011890b6a10001, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,278 INFO [RS:0;350fccc412b5:37985 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,37985,1731037806857; zookeeper connection closed. 2024-11-08T03:50:11,278 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@38bd026e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@38bd026e 2024-11-08T03:50:11,279 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T03:50:11,387 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,37985,1731037806857 already deleted, retry=false 2024-11-08T03:50:11,388 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,37985,1731037806857 expired; onlineServers=0 2024-11-08T03:50:11,388 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '350fccc412b5,45513,1731037806126' ***** 2024-11-08T03:50:11,388 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T03:50:11,389 INFO [M:0;350fccc412b5:45513 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:11,389 INFO [M:0;350fccc412b5:45513 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:11,390 DEBUG [M:0;350fccc412b5:45513 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T03:50:11,390 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T03:50:11,390 DEBUG [M:0;350fccc412b5:45513 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T03:50:11,390 DEBUG [master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037808175 {}] cleaner.HFileCleaner(306): Exit Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037808175,5,FailOnTimeoutGroup] 2024-11-08T03:50:11,390 DEBUG [master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037808176 {}] cleaner.HFileCleaner(306): Exit Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037808176,5,FailOnTimeoutGroup] 2024-11-08T03:50:11,391 INFO [M:0;350fccc412b5:45513 {}] hbase.ChoreService(370): Chore service for: master/350fccc412b5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:11,392 INFO [M:0;350fccc412b5:45513 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:11,392 DEBUG [M:0;350fccc412b5:45513 {}] master.HMaster(1795): Stopping service threads 2024-11-08T03:50:11,393 INFO [M:0;350fccc412b5:45513 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T03:50:11,393 INFO [M:0;350fccc412b5:45513 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T03:50:11,394 INFO [M:0;350fccc412b5:45513 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T03:50:11,395 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T03:50:11,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:11,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:11,420 DEBUG [M:0;350fccc412b5:45513 {}] zookeeper.ZKUtil(347): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T03:50:11,421 WARN [M:0;350fccc412b5:45513 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T03:50:11,423 INFO [M:0;350fccc412b5:45513 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/.lastflushedseqids 2024-11-08T03:50:11,439 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,439 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,442 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41624 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41624 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-08T03:50:11,447 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,447 INFO [M:0;350fccc412b5:45513 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T03:50:11,447 INFO [M:0;350fccc412b5:45513 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T03:50:11,448 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T03:50:11,448 INFO [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:11,448 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:11,448 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T03:50:11,448 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:11,448 INFO [M:0;350fccc412b5:45513 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-08T03:50:11,467 DEBUG [M:0;350fccc412b5:45513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/70815d99d1ad4cf7ae76fb25eb644a4c is 82, key is hbase:meta,,1/info:regioninfo/1731037809101/Put/seqid=0 2024-11-08T03:50:11,469 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,469 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,471 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:49136 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:43295:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49136 dst: /127.0.0.1:43295 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-08T03:50:11,476 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,476 INFO [M:0;350fccc412b5:45513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/70815d99d1ad4cf7ae76fb25eb644a4c 2024-11-08T03:50:11,503 DEBUG [M:0;350fccc412b5:45513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7f13c3962d7438c815f2c49303f5bcf is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731037809907/Put/seqid=0 2024-11-08T03:50:11,505 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,505 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:58260 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:42839:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58260 dst: /127.0.0.1:42839 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_-9223372036854775552_1037 (size=6439) 2024-11-08T03:50:11,513 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,513 INFO [M:0;350fccc412b5:45513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7f13c3962d7438c815f2c49303f5bcf 2024-11-08T03:50:11,537 DEBUG [M:0;350fccc412b5:45513 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530bd18b414f47e5b4714b1de072970a is 69, key is 350fccc412b5,36433,1731037806965/rs:state/1731037808199/Put/seqid=0 2024-11-08T03:50:11,539 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,539 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-08T03:50:11,541 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_335514009_22 at /127.0.0.1:41640 [Receiving block BP-1412343470-172.17.0.3-1731037801377:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33069:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41640 dst: /127.0.0.1:33069 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-08T03:50:11,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-08T03:50:11,546 WARN [M:0;350fccc412b5:45513 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-08T03:50:11,546 INFO [M:0;350fccc412b5:45513 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530bd18b414f47e5b4714b1de072970a 2024-11-08T03:50:11,554 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/70815d99d1ad4cf7ae76fb25eb644a4c as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/70815d99d1ad4cf7ae76fb25eb644a4c 2024-11-08T03:50:11,562 INFO [M:0;350fccc412b5:45513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/70815d99d1ad4cf7ae76fb25eb644a4c, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T03:50:11,563 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f7f13c3962d7438c815f2c49303f5bcf as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f7f13c3962d7438c815f2c49303f5bcf 2024-11-08T03:50:11,571 INFO [M:0;350fccc412b5:45513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f7f13c3962d7438c815f2c49303f5bcf, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T03:50:11,572 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/530bd18b414f47e5b4714b1de072970a as hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/530bd18b414f47e5b4714b1de072970a 2024-11-08T03:50:11,580 INFO [M:0;350fccc412b5:45513 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/530bd18b414f47e5b4714b1de072970a, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T03:50:11,582 INFO [M:0;350fccc412b5:45513 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false 2024-11-08T03:50:11,583 INFO [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:11,583 DEBUG [M:0;350fccc412b5:45513 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731037811448Disabling compacts and flushes for region at 1731037811448Disabling writes for close at 1731037811448Obtaining lock to block concurrent updates at 1731037811448Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731037811448Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731037811449 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731037811450 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731037811450Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731037811466 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731037811466Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731037811485 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731037811502 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731037811502Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731037811521 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731037811536 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731037811536Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@223ca218: reopening flushed file at 1731037811553 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c7e3642: reopening flushed file at 1731037811562 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b10aa2f: reopening flushed file at 1731037811571 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 133ms, sequenceid=72, compaction requested=false at 1731037811582 (+11 ms)Writing region close event to WAL at 1731037811583 (+1 ms)Closed at 1731037811583 2024-11-08T03:50:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33069 is added to blk_1073741825_1011 (size=32674) 2024-11-08T03:50:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42839 is added to blk_1073741825_1011 (size=32674) 2024-11-08T03:50:11,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43295 is added to blk_1073741825_1011 (size=32674) 2024-11-08T03:50:11,588 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:11,588 INFO [M:0;350fccc412b5:45513 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T03:50:11,588 INFO [M:0;350fccc412b5:45513 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:45513 2024-11-08T03:50:11,588 INFO [M:0;350fccc412b5:45513 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:11,699 INFO [M:0;350fccc412b5:45513 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:11,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45513-0x1011890b6a10000, quorum=127.0.0.1:56972, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:11,771 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@17f8e572{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:11,777 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11aaae40{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:11,777 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:11,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@14c1b227{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:11,778 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28d0ee11{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:11,782 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:11,782 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:11,782 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1412343470-172.17.0.3-1731037801377 (Datanode Uuid 40bf6719-82e2-491f-b32c-24904674396c) service to localhost/127.0.0.1:41665 2024-11-08T03:50:11,782 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:11,783 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data5/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,783 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data6/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,784 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:11,785 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@133f1bad{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:11,786 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@39860596{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:11,786 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:11,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@125705fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:11,786 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@50510811{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:11,787 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:11,787 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1412343470-172.17.0.3-1731037801377 (Datanode Uuid 77707996-e807-4342-a960-0e4b5a7c8c2b) service to localhost/127.0.0.1:41665 2024-11-08T03:50:11,788 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data3/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,788 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data4/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,788 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:11,788 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:11,788 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:11,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3990ff75{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:11,793 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48731e1b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:11,793 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:11,793 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4f02cc61{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:11,794 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@179ed6d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:11,795 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:11,795 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:11,795 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:11,795 WARN [BP-1412343470-172.17.0.3-1731037801377 heartbeating to localhost/127.0.0.1:41665 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1412343470-172.17.0.3-1731037801377 (Datanode Uuid 10059244-cf2b-4b9b-8b92-f22ec880b825) service to localhost/127.0.0.1:41665 2024-11-08T03:50:11,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data1/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,796 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/cluster_5ab81a15-12c2-dc99-6fad-232c01c6f10c/data/data2/current/BP-1412343470-172.17.0.3-1731037801377 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:11,796 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:11,804 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cb83937{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T03:50:11,805 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69b5b273{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:11,805 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:11,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3cbd6fd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:11,805 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b03c34d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:11,813 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T03:50:11,840 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T03:50:11,846 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=94 (was 161), OpenFileDescriptor=447 (was 393) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=65 (was 62) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=12786 (was 13091) 2024-11-08T03:50:11,852 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=94, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=65, ProcessCount=11, AvailableMemoryMB=12786 2024-11-08T03:50:11,852 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.log.dir so I do NOT create it in target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/95af2f10-c580-fc6f-1b10-a4fe6b6bc814/hadoop.tmp.dir so I do NOT create it in target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6, deleteOnExit=true 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/test.cache.data in system properties and HBase conf 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.tmp.dir in system properties and HBase conf 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir in system properties and HBase conf 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-08T03:50:11,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-08T03:50:11,854 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-08T03:50:11,854 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/nfs.dump.dir in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/java.io.tmpdir in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-08T03:50:11,855 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-08T03:50:12,189 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:12,195 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:12,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:12,196 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:12,197 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T03:50:12,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:12,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b990189{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:12,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c37558{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:12,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@574dcc44{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/java.io.tmpdir/jetty-localhost-35605-hadoop-hdfs-3_4_1-tests_jar-_-any-15119603135123721457/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T03:50:12,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4a66f025{HTTP/1.1, (http/1.1)}{localhost:35605} 2024-11-08T03:50:12,293 INFO [Time-limited test {}] server.Server(415): Started @12747ms 2024-11-08T03:50:12,598 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:12,602 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:12,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:12,603 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:12,603 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T03:50:12,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47a1471c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:12,604 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f39fb56{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:12,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@8cdf55{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/java.io.tmpdir/jetty-localhost-43105-hadoop-hdfs-3_4_1-tests_jar-_-any-8186512335526635355/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:12,699 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5d6b32e0{HTTP/1.1, (http/1.1)}{localhost:43105} 2024-11-08T03:50:12,700 INFO [Time-limited test {}] server.Server(415): Started @13154ms 2024-11-08T03:50:12,701 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:12,730 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:12,734 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:12,734 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:12,734 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:12,735 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-08T03:50:12,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45f9ed01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:12,735 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@cd43b0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:12,832 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f04efdd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/java.io.tmpdir/jetty-localhost-42899-hadoop-hdfs-3_4_1-tests_jar-_-any-11763231453412597356/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:12,832 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1777b071{HTTP/1.1, (http/1.1)}{localhost:42899} 2024-11-08T03:50:12,832 INFO [Time-limited test {}] server.Server(415): Started @13287ms 2024-11-08T03:50:12,834 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:12,865 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-08T03:50:12,868 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-08T03:50:12,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-08T03:50:12,871 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-08T03:50:12,871 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-08T03:50:12,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68281a7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,AVAILABLE} 2024-11-08T03:50:12,872 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1de5d3bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-08T03:50:12,967 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2cad0990{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/java.io.tmpdir/jetty-localhost-43329-hadoop-hdfs-3_4_1-tests_jar-_-any-132280268697708653/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:12,967 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3aa37294{HTTP/1.1, (http/1.1)}{localhost:43329} 2024-11-08T03:50:12,967 INFO [Time-limited test {}] server.Server(415): Started @13422ms 2024-11-08T03:50:12,969 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-08T03:50:14,465 WARN [Thread-565 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data1/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,465 WARN [Thread-566 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data2/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,489 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x129e390f9ed203a2 with lease ID 0x18461766758a3b06: Processing first storage report for DS-3de8e3b0-4291-40be-ae86-40bd0bda1b2c from datanode DatanodeRegistration(127.0.0.1:33003, datanodeUuid=e849586a-c2bf-4c42-a156-b8e015412504, infoPort=38105, infoSecurePort=0, ipcPort=45889, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x129e390f9ed203a2 with lease ID 0x18461766758a3b06: from storage DS-3de8e3b0-4291-40be-ae86-40bd0bda1b2c node DatanodeRegistration(127.0.0.1:33003, datanodeUuid=e849586a-c2bf-4c42-a156-b8e015412504, infoPort=38105, infoSecurePort=0, ipcPort=45889, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x129e390f9ed203a2 with lease ID 0x18461766758a3b06: Processing first storage report for DS-248c929d-0b2a-450f-b07b-50acd50c1452 from datanode DatanodeRegistration(127.0.0.1:33003, datanodeUuid=e849586a-c2bf-4c42-a156-b8e015412504, infoPort=38105, infoSecurePort=0, ipcPort=45889, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,492 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x129e390f9ed203a2 with lease ID 0x18461766758a3b06: from storage DS-248c929d-0b2a-450f-b07b-50acd50c1452 node DatanodeRegistration(127.0.0.1:33003, datanodeUuid=e849586a-c2bf-4c42-a156-b8e015412504, infoPort=38105, infoSecurePort=0, ipcPort=45889, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,573 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-08T03:50:14,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T03:50:14,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-08T03:50:14,686 WARN [Thread-577 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data3/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,686 WARN [Thread-578 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data4/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,710 WARN [Thread-528 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:14,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd9fbc89cc9da879 with lease ID 0x18461766758a3b07: Processing first storage report for DS-b50663ca-cc22-41eb-9237-1e22cd94290b from datanode DatanodeRegistration(127.0.0.1:42013, datanodeUuid=342b4667-e136-4271-9ffa-e18fd6e7295d, infoPort=35259, infoSecurePort=0, ipcPort=43865, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd9fbc89cc9da879 with lease ID 0x18461766758a3b07: from storage DS-b50663ca-cc22-41eb-9237-1e22cd94290b node DatanodeRegistration(127.0.0.1:42013, datanodeUuid=342b4667-e136-4271-9ffa-e18fd6e7295d, infoPort=35259, infoSecurePort=0, ipcPort=43865, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,714 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcd9fbc89cc9da879 with lease ID 0x18461766758a3b07: Processing first storage report for DS-70dcc98e-b996-4ada-a55b-4cfa60ae35aa from datanode DatanodeRegistration(127.0.0.1:42013, datanodeUuid=342b4667-e136-4271-9ffa-e18fd6e7295d, infoPort=35259, infoSecurePort=0, ipcPort=43865, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,714 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcd9fbc89cc9da879 with lease ID 0x18461766758a3b07: from storage DS-70dcc98e-b996-4ada-a55b-4cfa60ae35aa node DatanodeRegistration(127.0.0.1:42013, datanodeUuid=342b4667-e136-4271-9ffa-e18fd6e7295d, infoPort=35259, infoSecurePort=0, ipcPort=43865, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,727 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data5/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,728 WARN [Thread-589 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data6/current/BP-2060482000-172.17.0.3-1731037811879/current, will proceed with Du for space computation calculation, 2024-11-08T03:50:14,746 WARN [Thread-550 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-08T03:50:14,748 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f4e0e8a8adf71e2 with lease ID 0x18461766758a3b08: Processing first storage report for DS-c48863d7-86e0-4269-8ebc-a871e6341789 from datanode DatanodeRegistration(127.0.0.1:43531, datanodeUuid=7888a538-ce46-400c-ac32-cb7cb76b633c, infoPort=44245, infoSecurePort=0, ipcPort=45763, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f4e0e8a8adf71e2 with lease ID 0x18461766758a3b08: from storage DS-c48863d7-86e0-4269-8ebc-a871e6341789 node DatanodeRegistration(127.0.0.1:43531, datanodeUuid=7888a538-ce46-400c-ac32-cb7cb76b633c, infoPort=44245, infoSecurePort=0, ipcPort=45763, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f4e0e8a8adf71e2 with lease ID 0x18461766758a3b08: Processing first storage report for DS-63d047d2-7401-45ac-934e-5950434f712a from datanode DatanodeRegistration(127.0.0.1:43531, datanodeUuid=7888a538-ce46-400c-ac32-cb7cb76b633c, infoPort=44245, infoSecurePort=0, ipcPort=45763, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879) 2024-11-08T03:50:14,749 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f4e0e8a8adf71e2 with lease ID 0x18461766758a3b08: from storage DS-63d047d2-7401-45ac-934e-5950434f712a node DatanodeRegistration(127.0.0.1:43531, datanodeUuid=7888a538-ce46-400c-ac32-cb7cb76b633c, infoPort=44245, infoSecurePort=0, ipcPort=45763, storageInfo=lv=-57;cid=testClusterID;nsid=1180355534;c=1731037811879), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-08T03:50:14,817 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37 2024-11-08T03:50:14,821 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/zookeeper_0, clientPort=55842, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-08T03:50:14,822 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55842 2024-11-08T03:50:14,823 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:14,825 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:14,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741825_1001 (size=7) 2024-11-08T03:50:14,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741825_1001 (size=7) 2024-11-08T03:50:14,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741825_1001 (size=7) 2024-11-08T03:50:14,847 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 with version=8 2024-11-08T03:50:14,847 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:41665/user/jenkins/test-data/f859c73f-daae-fd8b-827d-dbf18bd26407/hbase-staging 2024-11-08T03:50:14,850 INFO [Time-limited test {}] client.ConnectionUtils(128): master/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:14,850 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-08T03:50:14,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:14,851 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:42065 2024-11-08T03:50:14,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42065 connecting to ZooKeeper ensemble=127.0.0.1:55842 2024-11-08T03:50:14,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:420650x0, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:14,918 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42065-0x1011890dbcf0000 connected 2024-11-08T03:50:14,999 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,004 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,007 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:15,007 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1, hbase.cluster.distributed=false 2024-11-08T03:50:15,010 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:15,010 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42065 2024-11-08T03:50:15,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42065 2024-11-08T03:50:15,011 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42065 2024-11-08T03:50:15,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42065 2024-11-08T03:50:15,012 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42065 2024-11-08T03:50:15,030 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:15,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,030 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:15,031 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:37435 2024-11-08T03:50:15,033 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37435 connecting to ZooKeeper ensemble=127.0.0.1:55842 2024-11-08T03:50:15,034 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,035 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,048 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:374350x0, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:15,049 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37435-0x1011890dbcf0001 connected 2024-11-08T03:50:15,049 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:15,050 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:15,050 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:15,051 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:15,053 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:15,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37435 2024-11-08T03:50:15,054 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37435 2024-11-08T03:50:15,055 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37435 2024-11-08T03:50:15,055 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37435 2024-11-08T03:50:15,056 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37435 2024-11-08T03:50:15,073 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:15,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,073 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:15,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,073 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:15,074 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:15,074 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:15,074 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:44007 2024-11-08T03:50:15,076 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44007 connecting to ZooKeeper ensemble=127.0.0.1:55842 2024-11-08T03:50:15,076 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,078 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440070x0, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:15,093 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44007-0x1011890dbcf0002 connected 2024-11-08T03:50:15,094 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:15,094 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:15,095 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:15,096 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:15,097 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:15,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44007 2024-11-08T03:50:15,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44007 2024-11-08T03:50:15,098 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44007 2024-11-08T03:50:15,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44007 2024-11-08T03:50:15,099 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44007 2024-11-08T03:50:15,116 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/350fccc412b5:0 server-side Connection retries=45 2024-11-08T03:50:15,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,116 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-08T03:50:15,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-08T03:50:15,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-08T03:50:15,117 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-08T03:50:15,117 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-08T03:50:15,117 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.3:46863 2024-11-08T03:50:15,119 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46863 connecting to ZooKeeper ensemble=127.0.0.1:55842 2024-11-08T03:50:15,119 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,121 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,135 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:468630x0, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-08T03:50:15,136 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:468630x0, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:15,136 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46863-0x1011890dbcf0003 connected 2024-11-08T03:50:15,136 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-08T03:50:15,137 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-08T03:50:15,137 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-08T03:50:15,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-08T03:50:15,139 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-08T03:50:15,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46863 2024-11-08T03:50:15,140 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46863 2024-11-08T03:50:15,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-08T03:50:15,141 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46863 2024-11-08T03:50:15,155 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;350fccc412b5:42065 2024-11-08T03:50:15,156 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,167 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,177 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:15,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,178 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-08T03:50:15,179 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/350fccc412b5,42065,1731037814849 from backup master directory 2024-11-08T03:50:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-08T03:50:15,188 WARN [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:15,188 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,194 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/hbase.id] with ID: a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5 2024-11-08T03:50:15,194 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/.tmp/hbase.id 2024-11-08T03:50:15,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741826_1002 (size=42) 2024-11-08T03:50:15,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741826_1002 (size=42) 2024-11-08T03:50:15,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741826_1002 (size=42) 2024-11-08T03:50:15,204 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/.tmp/hbase.id]:[hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/hbase.id] 2024-11-08T03:50:15,221 INFO [master/350fccc412b5:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-08T03:50:15,221 INFO [master/350fccc412b5:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-08T03:50:15,223 INFO [master/350fccc412b5:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-08T03:50:15,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741827_1003 (size=196) 2024-11-08T03:50:15,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741827_1003 (size=196) 2024-11-08T03:50:15,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741827_1003 (size=196) 2024-11-08T03:50:15,240 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T03:50:15,241 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-08T03:50:15,241 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T03:50:15,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741828_1004 (size=1189) 2024-11-08T03:50:15,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741828_1004 (size=1189) 2024-11-08T03:50:15,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741828_1004 (size=1189) 2024-11-08T03:50:15,254 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store 2024-11-08T03:50:15,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741829_1005 (size=34) 2024-11-08T03:50:15,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741829_1005 (size=34) 2024-11-08T03:50:15,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741829_1005 (size=34) 2024-11-08T03:50:15,263 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:15,263 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T03:50:15,263 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:15,263 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:15,263 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T03:50:15,263 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:15,263 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:15,264 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731037815263Disabling compacts and flushes for region at 1731037815263Disabling writes for close at 1731037815263Writing region close event to WAL at 1731037815263Closed at 1731037815263 2024-11-08T03:50:15,264 WARN [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/.initializing 2024-11-08T03:50:15,265 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/WALs/350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,268 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C42065%2C1731037814849, suffix=, logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/WALs/350fccc412b5,42065,1731037814849, archiveDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/oldWALs, maxLogs=10 2024-11-08T03:50:15,269 INFO [master/350fccc412b5:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 350fccc412b5%2C42065%2C1731037814849.1731037815268 2024-11-08T03:50:15,278 INFO [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/WALs/350fccc412b5,42065,1731037814849/350fccc412b5%2C42065%2C1731037814849.1731037815268 2024-11-08T03:50:15,280 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:38105:38105),(127.0.0.1/127.0.0.1:35259:35259)] 2024-11-08T03:50:15,281 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:15,281 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:15,281 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,281 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,283 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-08T03:50:15,285 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,285 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:15,286 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,287 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-08T03:50:15,287 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:15,288 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,290 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-08T03:50:15,291 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:15,291 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,293 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-08T03:50:15,293 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,294 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:15,294 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,295 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,296 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,297 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,297 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,298 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:15,299 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-08T03:50:15,301 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:15,302 INFO [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62129214, jitterRate=-0.07420256733894348}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:15,303 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731037815281Initializing all the Stores at 1731037815283 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037815283Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037815283Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037815283Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037815283Cleaning up temporary data from old regions at 1731037815297 (+14 ms)Region opened successfully at 1731037815303 (+6 ms) 2024-11-08T03:50:15,303 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-08T03:50:15,308 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63a2a583, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:15,309 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-08T03:50:15,309 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-08T03:50:15,309 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-08T03:50:15,310 INFO [master/350fccc412b5:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-08T03:50:15,310 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-08T03:50:15,311 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-08T03:50:15,311 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-08T03:50:15,314 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-08T03:50:15,315 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-08T03:50:15,324 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-08T03:50:15,325 INFO [master/350fccc412b5:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-08T03:50:15,326 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-08T03:50:15,335 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-08T03:50:15,335 INFO [master/350fccc412b5:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-08T03:50:15,337 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-08T03:50:15,345 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-08T03:50:15,347 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-08T03:50:15,356 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-08T03:50:15,358 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-08T03:50:15,366 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,378 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=350fccc412b5,42065,1731037814849, sessionid=0x1011890dbcf0000, setting cluster-up flag (Was=false) 2024-11-08T03:50:15,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,398 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,430 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-08T03:50:15,433 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,451 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,482 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-08T03:50:15,484 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=350fccc412b5,42065,1731037814849 2024-11-08T03:50:15,485 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-08T03:50:15,488 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:15,488 INFO [master/350fccc412b5:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-08T03:50:15,488 INFO [master/350fccc412b5:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-08T03:50:15,488 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 350fccc412b5,42065,1731037814849 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/350fccc412b5:0, corePoolSize=5, maxPoolSize=5 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/350fccc412b5:0, corePoolSize=10, maxPoolSize=10 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:15,490 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,491 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731037845491 2024-11-08T03:50:15,491 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-08T03:50:15,491 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-08T03:50:15,492 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-08T03:50:15,492 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-08T03:50:15,492 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-08T03:50:15,492 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-08T03:50:15,492 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,493 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-08T03:50:15,493 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-08T03:50:15,493 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-08T03:50:15,493 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:15,493 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-08T03:50:15,493 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-08T03:50:15,493 INFO [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-08T03:50:15,493 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037815493,5,FailOnTimeoutGroup] 2024-11-08T03:50:15,494 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037815493,5,FailOnTimeoutGroup] 2024-11-08T03:50:15,494 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,494 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-08T03:50:15,494 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,494 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,495 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,495 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-08T03:50:15,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741831_1007 (size=1321) 2024-11-08T03:50:15,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741831_1007 (size=1321) 2024-11-08T03:50:15,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741831_1007 (size=1321) 2024-11-08T03:50:15,506 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-08T03:50:15,506 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 2024-11-08T03:50:15,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741832_1008 (size=32) 2024-11-08T03:50:15,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741832_1008 (size=32) 2024-11-08T03:50:15,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741832_1008 (size=32) 2024-11-08T03:50:15,516 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:15,517 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T03:50:15,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T03:50:15,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:15,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T03:50:15,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T03:50:15,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:15,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T03:50:15,524 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T03:50:15,524 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:15,525 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T03:50:15,526 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T03:50:15,527 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:15,527 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:15,527 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T03:50:15,528 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740 2024-11-08T03:50:15,529 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740 2024-11-08T03:50:15,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T03:50:15,530 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T03:50:15,531 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:15,532 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T03:50:15,535 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:15,535 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63747345, jitterRate=-0.05009053647518158}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:15,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731037815516Initializing all the Stores at 1731037815517 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037815517Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037815517Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037815517Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037815517Cleaning up temporary data from old regions at 1731037815530 (+13 ms)Region opened successfully at 1731037815536 (+6 ms) 2024-11-08T03:50:15,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T03:50:15,536 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T03:50:15,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T03:50:15,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T03:50:15,536 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T03:50:15,537 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:15,537 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731037815536Disabling compacts and flushes for region at 1731037815536Disabling writes for close at 1731037815536Writing region close event to WAL at 1731037815537 (+1 ms)Closed at 1731037815537 2024-11-08T03:50:15,539 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:15,539 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-08T03:50:15,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-08T03:50:15,540 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T03:50:15,542 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-08T03:50:15,543 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(746): ClusterId : a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5 2024-11-08T03:50:15,543 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:15,553 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(746): ClusterId : a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5 2024-11-08T03:50:15,553 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(746): ClusterId : a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5 2024-11-08T03:50:15,554 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:15,554 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-08T03:50:15,566 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:15,567 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:15,577 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:15,577 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-08T03:50:15,577 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:15,577 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-08T03:50:15,588 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:15,588 DEBUG [RS:0;350fccc412b5:37435 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@525a8609, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:15,588 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:15,589 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-08T03:50:15,589 DEBUG [RS:1;350fccc412b5:44007 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6c0b49a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:15,589 DEBUG [RS:2;350fccc412b5:46863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2556fd98, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=350fccc412b5/172.17.0.3:0 2024-11-08T03:50:15,600 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;350fccc412b5:37435 2024-11-08T03:50:15,600 INFO [RS:0;350fccc412b5:37435 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:15,600 INFO [RS:0;350fccc412b5:37435 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:15,600 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:15,601 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,42065,1731037814849 with port=37435, startcode=1731037815030 2024-11-08T03:50:15,601 DEBUG [RS:0;350fccc412b5:37435 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:15,604 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:47641, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:15,604 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,605 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,605 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;350fccc412b5:46863 2024-11-08T03:50:15,605 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;350fccc412b5:44007 2024-11-08T03:50:15,605 INFO [RS:2;350fccc412b5:46863 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:15,606 INFO [RS:1;350fccc412b5:44007 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-08T03:50:15,606 INFO [RS:2;350fccc412b5:46863 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:15,606 INFO [RS:1;350fccc412b5:44007 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-08T03:50:15,606 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:15,606 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-08T03:50:15,607 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,42065,1731037814849 with port=46863, startcode=1731037815116 2024-11-08T03:50:15,607 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(2659): reportForDuty to master=350fccc412b5,42065,1731037814849 with port=44007, startcode=1731037815073 2024-11-08T03:50:15,607 DEBUG [RS:2;350fccc412b5:46863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:15,607 DEBUG [RS:1;350fccc412b5:44007 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-08T03:50:15,607 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 2024-11-08T03:50:15,607 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38775 2024-11-08T03:50:15,607 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:15,608 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:56795, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:15,608 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51921, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-08T03:50:15,609 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,609 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,611 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42065 {}] master.ServerManager(517): Registering regionserver=350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,611 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 2024-11-08T03:50:15,611 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38775 2024-11-08T03:50:15,611 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:15,613 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 2024-11-08T03:50:15,613 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38775 2024-11-08T03:50:15,613 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-08T03:50:15,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:15,655 DEBUG [RS:0;350fccc412b5:37435 {}] zookeeper.ZKUtil(111): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,655 WARN [RS:0;350fccc412b5:37435 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:15,655 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,37435,1731037815030] 2024-11-08T03:50:15,655 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,44007,1731037815073] 2024-11-08T03:50:15,655 INFO [RS:0;350fccc412b5:37435 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T03:50:15,655 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [350fccc412b5,46863,1731037815116] 2024-11-08T03:50:15,655 DEBUG [RS:2;350fccc412b5:46863 {}] zookeeper.ZKUtil(111): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,655 DEBUG [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,655 DEBUG [RS:1;350fccc412b5:44007 {}] zookeeper.ZKUtil(111): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,655 WARN [RS:2;350fccc412b5:46863 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:15,655 WARN [RS:1;350fccc412b5:44007 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-08T03:50:15,655 INFO [RS:2;350fccc412b5:46863 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T03:50:15,656 INFO [RS:1;350fccc412b5:44007 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T03:50:15,656 DEBUG [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,656 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,660 INFO [RS:0;350fccc412b5:37435 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:15,660 INFO [RS:2;350fccc412b5:46863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:15,660 INFO [RS:1;350fccc412b5:44007 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-08T03:50:15,663 INFO [RS:2;350fccc412b5:46863 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:15,664 INFO [RS:1;350fccc412b5:44007 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:15,664 INFO [RS:2;350fccc412b5:46863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:15,664 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,665 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:15,665 INFO [RS:1;350fccc412b5:44007 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:15,665 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,665 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:15,666 INFO [RS:2;350fccc412b5:46863 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:15,666 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,666 INFO [RS:1;350fccc412b5:44007 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:15,666 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,666 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,666 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,667 DEBUG [RS:2;350fccc412b5:46863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,667 DEBUG [RS:1;350fccc412b5:44007 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,668 INFO [RS:0;350fccc412b5:37435 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-08T03:50:15,668 INFO [RS:0;350fccc412b5:37435 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-08T03:50:15,668 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,44007,1731037815073-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,672 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,46863,1731037815116-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:15,673 INFO [RS:0;350fccc412b5:37435 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-08T03:50:15,673 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,673 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,673 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,673 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,673 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,673 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/350fccc412b5:0, corePoolSize=2, maxPoolSize=2 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/350fccc412b5:0, corePoolSize=1, maxPoolSize=1 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,674 DEBUG [RS:0;350fccc412b5:37435 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0, corePoolSize=3, maxPoolSize=3 2024-11-08T03:50:15,676 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,676 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,676 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,676 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,677 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,677 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,37435,1731037815030-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:15,686 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:15,686 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:15,686 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,44007,1731037815073-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,686 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,46863,1731037815116-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,687 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,687 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,687 INFO [RS:2;350fccc412b5:46863 {}] regionserver.Replication(171): 350fccc412b5,46863,1731037815116 started 2024-11-08T03:50:15,687 INFO [RS:1;350fccc412b5:44007 {}] regionserver.Replication(171): 350fccc412b5,44007,1731037815073 started 2024-11-08T03:50:15,690 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-08T03:50:15,690 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,37435,1731037815030-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,690 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,690 INFO [RS:0;350fccc412b5:37435 {}] regionserver.Replication(171): 350fccc412b5,37435,1731037815030 started 2024-11-08T03:50:15,692 WARN [350fccc412b5:42065 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-08T03:50:15,700 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,700 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,700 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,44007,1731037815073, RpcServer on 350fccc412b5/172.17.0.3:44007, sessionid=0x1011890dbcf0002 2024-11-08T03:50:15,700 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,46863,1731037815116, RpcServer on 350fccc412b5/172.17.0.3:46863, sessionid=0x1011890dbcf0003 2024-11-08T03:50:15,701 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:15,701 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:15,701 DEBUG [RS:1;350fccc412b5:44007 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,701 DEBUG [RS:2;350fccc412b5:46863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,701 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,44007,1731037815073' 2024-11-08T03:50:15,701 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,46863,1731037815116' 2024-11-08T03:50:15,701 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:15,701 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:15,701 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:15,701 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:15,702 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:15,702 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:15,702 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:15,702 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:15,702 DEBUG [RS:2;350fccc412b5:46863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,46863,1731037815116 2024-11-08T03:50:15,702 DEBUG [RS:1;350fccc412b5:44007 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,702 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,46863,1731037815116' 2024-11-08T03:50:15,702 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,44007,1731037815073' 2024-11-08T03:50:15,702 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:15,702 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:15,704 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:15,704 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1482): Serving as 350fccc412b5,37435,1731037815030, RpcServer on 350fccc412b5/172.17.0.3:37435, sessionid=0x1011890dbcf0001 2024-11-08T03:50:15,704 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-08T03:50:15,704 DEBUG [RS:0;350fccc412b5:37435 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,704 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:15,704 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:15,704 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,37435,1731037815030' 2024-11-08T03:50:15,704 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-08T03:50:15,705 DEBUG [RS:1;350fccc412b5:44007 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-08T03:50:15,705 INFO [RS:1;350fccc412b5:44007 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:15,705 DEBUG [RS:2;350fccc412b5:46863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:15,705 INFO [RS:1;350fccc412b5:44007 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:15,705 INFO [RS:2;350fccc412b5:46863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:15,705 INFO [RS:2;350fccc412b5:46863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 350fccc412b5,37435,1731037815030 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '350fccc412b5,37435,1731037815030' 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-08T03:50:15,705 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-08T03:50:15,706 DEBUG [RS:0;350fccc412b5:37435 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-08T03:50:15,706 INFO [RS:0;350fccc412b5:37435 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-08T03:50:15,706 INFO [RS:0;350fccc412b5:37435 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-08T03:50:15,808 INFO [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C44007%2C1731037815073, suffix=, logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,44007,1731037815073, archiveDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs, maxLogs=32 2024-11-08T03:50:15,808 INFO [RS:2;350fccc412b5:46863 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C46863%2C1731037815116, suffix=, logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,46863,1731037815116, archiveDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs, maxLogs=32 2024-11-08T03:50:15,809 INFO [RS:0;350fccc412b5:37435 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C37435%2C1731037815030, suffix=, logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,37435,1731037815030, archiveDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs, maxLogs=32 2024-11-08T03:50:15,811 INFO [RS:2;350fccc412b5:46863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 350fccc412b5%2C46863%2C1731037815116.1731037815811 2024-11-08T03:50:15,812 INFO [RS:1;350fccc412b5:44007 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 350fccc412b5%2C44007%2C1731037815073.1731037815812 2024-11-08T03:50:15,813 INFO [RS:0;350fccc412b5:37435 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 350fccc412b5%2C37435%2C1731037815030.1731037815812 2024-11-08T03:50:15,825 INFO [RS:2;350fccc412b5:46863 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,46863,1731037815116/350fccc412b5%2C46863%2C1731037815116.1731037815811 2024-11-08T03:50:15,826 INFO [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,44007,1731037815073/350fccc412b5%2C44007%2C1731037815073.1731037815812 2024-11-08T03:50:15,828 INFO [RS:0;350fccc412b5:37435 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,37435,1731037815030/350fccc412b5%2C37435%2C1731037815030.1731037815812 2024-11-08T03:50:15,829 DEBUG [RS:2;350fccc412b5:46863 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:38105:38105),(127.0.0.1/127.0.0.1:35259:35259)] 2024-11-08T03:50:15,829 DEBUG [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35259:35259),(127.0.0.1/127.0.0.1:38105:38105),(127.0.0.1/127.0.0.1:44245:44245)] 2024-11-08T03:50:15,830 DEBUG [RS:0;350fccc412b5:37435 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:38105:38105),(127.0.0.1/127.0.0.1:35259:35259)] 2024-11-08T03:50:15,943 DEBUG [350fccc412b5:42065 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-08T03:50:15,943 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(204): Hosts are {350fccc412b5=0} racks are {/default-rack=0} 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T03:50:15,947 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T03:50:15,947 INFO [350fccc412b5:42065 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T03:50:15,947 INFO [350fccc412b5:42065 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T03:50:15,948 INFO [350fccc412b5:42065 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T03:50:15,948 DEBUG [350fccc412b5:42065 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T03:50:15,948 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=350fccc412b5,44007,1731037815073 2024-11-08T03:50:15,951 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 350fccc412b5,44007,1731037815073, state=OPENING 2024-11-08T03:50:15,966 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-08T03:50:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,977 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:15,979 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-08T03:50:15,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:15,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:15,979 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:15,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=350fccc412b5,44007,1731037815073}] 2024-11-08T03:50:15,980 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:16,137 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-08T03:50:16,141 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:58587, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-08T03:50:16,149 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-08T03:50:16,149 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-08T03:50:16,152 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=350fccc412b5%2C44007%2C1731037815073.meta, suffix=.meta, logDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,44007,1731037815073, archiveDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs, maxLogs=32 2024-11-08T03:50:16,153 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 350fccc412b5%2C44007%2C1731037815073.meta.1731037816153.meta 2024-11-08T03:50:16,161 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/WALs/350fccc412b5,44007,1731037815073/350fccc412b5%2C44007%2C1731037815073.meta.1731037816153.meta 2024-11-08T03:50:16,164 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44245:44245),(127.0.0.1/127.0.0.1:35259:35259),(127.0.0.1/127.0.0.1:38105:38105)] 2024-11-08T03:50:16,165 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:16,165 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-08T03:50:16,165 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-08T03:50:16,166 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-08T03:50:16,166 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-08T03:50:16,166 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:16,166 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-08T03:50:16,166 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-08T03:50:16,168 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-08T03:50:16,169 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-08T03:50:16,169 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:16,170 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-08T03:50:16,171 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-08T03:50:16,171 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:16,172 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-08T03:50:16,173 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-08T03:50:16,173 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:16,174 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-08T03:50:16,175 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-08T03:50:16,175 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,175 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-08T03:50:16,176 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-08T03:50:16,176 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740 2024-11-08T03:50:16,178 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740 2024-11-08T03:50:16,179 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-08T03:50:16,179 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-08T03:50:16,180 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-08T03:50:16,181 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-08T03:50:16,183 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64965108, jitterRate=-0.031944453716278076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-08T03:50:16,183 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-08T03:50:16,184 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731037816166Writing region info on filesystem at 1731037816166Initializing all the Stores at 1731037816167 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037816167Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037816168 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037816168Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731037816168Cleaning up temporary data from old regions at 1731037816179 (+11 ms)Running coprocessor post-open hooks at 1731037816183 (+4 ms)Region opened successfully at 1731037816184 (+1 ms) 2024-11-08T03:50:16,186 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731037816137 2024-11-08T03:50:16,189 DEBUG [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-08T03:50:16,189 INFO [RS_OPEN_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-08T03:50:16,190 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=350fccc412b5,44007,1731037815073 2024-11-08T03:50:16,192 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 350fccc412b5,44007,1731037815073, state=OPEN 2024-11-08T03:50:16,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:16,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:16,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:16,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-08T03:50:16,198 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=350fccc412b5,44007,1731037815073 2024-11-08T03:50:16,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:16,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:16,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:16,198 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-08T03:50:16,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-08T03:50:16,203 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=350fccc412b5,44007,1731037815073 in 219 msec 2024-11-08T03:50:16,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-08T03:50:16,207 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 664 msec 2024-11-08T03:50:16,209 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-08T03:50:16,209 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-08T03:50:16,210 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T03:50:16,211 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=350fccc412b5,44007,1731037815073, seqNum=-1] 2024-11-08T03:50:16,211 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T03:50:16,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:39211, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T03:50:16,221 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 732 msec 2024-11-08T03:50:16,222 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731037816221, completionTime=-1 2024-11-08T03:50:16,222 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-08T03:50:16,222 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-08T03:50:16,224 INFO [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-08T03:50:16,224 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731037876224 2024-11-08T03:50:16,224 INFO [master/350fccc412b5:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731037936224 2024-11-08T03:50:16,224 INFO [master/350fccc412b5:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-08T03:50:16,225 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,225 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,225 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,225 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-350fccc412b5:42065, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,225 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,226 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,228 DEBUG [master/350fccc412b5:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.042sec 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-08T03:50:16,231 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-08T03:50:16,235 DEBUG [master/350fccc412b5:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-08T03:50:16,235 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-08T03:50:16,235 INFO [master/350fccc412b5:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=350fccc412b5,42065,1731037814849-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-08T03:50:16,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24105fcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:16,244 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 350fccc412b5,42065,-1 for getting cluster id 2024-11-08T03:50:16,245 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-08T03:50:16,246 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5' 2024-11-08T03:50:16,247 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-08T03:50:16,247 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a24fbbca-ae59-4d2b-a9b0-5ba7f8c043d5" 2024-11-08T03:50:16,247 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b203bfc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:16,248 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [350fccc412b5,42065,-1] 2024-11-08T03:50:16,248 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-08T03:50:16,248 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:16,250 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36892, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-08T03:50:16,252 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1253147a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-08T03:50:16,252 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-08T03:50:16,254 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=350fccc412b5,44007,1731037815073, seqNum=-1] 2024-11-08T03:50:16,254 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-08T03:50:16,257 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60434, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-08T03:50:16,260 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=350fccc412b5,42065,1731037814849 2024-11-08T03:50:16,261 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-08T03:50:16,263 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is 350fccc412b5,42065,1731037814849 2024-11-08T03:50:16,263 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5722c3a7 2024-11-08T03:50:16,263 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-08T03:50:16,265 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:36894, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-08T03:50:16,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.3 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-08T03:50:16,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-08T03:50:16,270 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-08T03:50:16,270 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,270 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-08T03:50:16,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:16,272 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-08T03:50:16,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741837_1013 (size=392) 2024-11-08T03:50:16,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741837_1013 (size=392) 2024-11-08T03:50:16,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741837_1013 (size=392) 2024-11-08T03:50:16,284 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b14711407486c21d92d1f84e3cb4763e, NAME => 'TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1 2024-11-08T03:50:16,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741838_1014 (size=51) 2024-11-08T03:50:16,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741838_1014 (size=51) 2024-11-08T03:50:16,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741838_1014 (size=51) 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing b14711407486c21d92d1f84e3cb4763e, disabling compactions & flushes 2024-11-08T03:50:16,295 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. after waiting 0 ms 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,295 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,295 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for b14711407486c21d92d1f84e3cb4763e: Waiting for close lock at 1731037816295Disabling compacts and flushes for region at 1731037816295Disabling writes for close at 1731037816295Writing region close event to WAL at 1731037816295Closed at 1731037816295 2024-11-08T03:50:16,298 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-08T03:50:16,298 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731037816298"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731037816298"}]},"ts":"1731037816298"} 2024-11-08T03:50:16,302 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-08T03:50:16,303 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-08T03:50:16,304 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731037816303"}]},"ts":"1731037816303"} 2024-11-08T03:50:16,307 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-08T03:50:16,307 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {350fccc412b5=0} racks are {/default-rack=0} 2024-11-08T03:50:16,308 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-08T03:50:16,309 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-08T03:50:16,309 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-08T03:50:16,309 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-08T03:50:16,309 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-08T03:50:16,309 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b14711407486c21d92d1f84e3cb4763e, ASSIGN}] 2024-11-08T03:50:16,311 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b14711407486c21d92d1f84e3cb4763e, ASSIGN 2024-11-08T03:50:16,313 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b14711407486c21d92d1f84e3cb4763e, ASSIGN; state=OFFLINE, location=350fccc412b5,44007,1731037815073; forceNewPlan=false, retain=false 2024-11-08T03:50:16,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:16,455 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-08T03:50:16,455 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-08T03:50:16,457 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T03:50:16,457 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-08T03:50:16,458 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-08T03:50:16,458 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-08T03:50:16,464 INFO [350fccc412b5:42065 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-08T03:50:16,464 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b14711407486c21d92d1f84e3cb4763e, regionState=OPENING, regionLocation=350fccc412b5,44007,1731037815073 2024-11-08T03:50:16,468 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b14711407486c21d92d1f84e3cb4763e, ASSIGN because future has completed 2024-11-08T03:50:16,468 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b14711407486c21d92d1f84e3cb4763e, server=350fccc412b5,44007,1731037815073}] 2024-11-08T03:50:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:16,630 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,631 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b14711407486c21d92d1f84e3cb4763e, NAME => 'TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.', STARTKEY => '', ENDKEY => ''} 2024-11-08T03:50:16,631 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,631 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-08T03:50:16,632 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,632 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,634 INFO [StoreOpener-b14711407486c21d92d1f84e3cb4763e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,636 INFO [StoreOpener-b14711407486c21d92d1f84e3cb4763e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b14711407486c21d92d1f84e3cb4763e columnFamilyName cf 2024-11-08T03:50:16,636 DEBUG [StoreOpener-b14711407486c21d92d1f84e3cb4763e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-08T03:50:16,637 INFO [StoreOpener-b14711407486c21d92d1f84e3cb4763e-1 {}] regionserver.HStore(327): Store=b14711407486c21d92d1f84e3cb4763e/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-08T03:50:16,637 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,638 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,638 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,639 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,639 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,641 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,644 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-08T03:50:16,644 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b14711407486c21d92d1f84e3cb4763e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59568318, jitterRate=-0.11236289143562317}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-08T03:50:16,645 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:16,645 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b14711407486c21d92d1f84e3cb4763e: Running coprocessor pre-open hook at 1731037816632Writing region info on filesystem at 1731037816632Initializing all the Stores at 1731037816634 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731037816634Cleaning up temporary data from old regions at 1731037816639 (+5 ms)Running coprocessor post-open hooks at 1731037816645 (+6 ms)Region opened successfully at 1731037816645 2024-11-08T03:50:16,647 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e., pid=6, masterSystemTime=1731037816622 2024-11-08T03:50:16,650 DEBUG [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,650 INFO [RS_OPEN_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:16,651 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b14711407486c21d92d1f84e3cb4763e, regionState=OPEN, openSeqNum=2, regionLocation=350fccc412b5,44007,1731037815073 2024-11-08T03:50:16,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b14711407486c21d92d1f84e3cb4763e, server=350fccc412b5,44007,1731037815073 because future has completed 2024-11-08T03:50:16,662 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-08T03:50:16,663 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b14711407486c21d92d1f84e3cb4763e, server=350fccc412b5,44007,1731037815073 in 189 msec 2024-11-08T03:50:16,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-08T03:50:16,666 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b14711407486c21d92d1f84e3cb4763e, ASSIGN in 353 msec 2024-11-08T03:50:16,668 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-08T03:50:16,668 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731037816668"}]},"ts":"1731037816668"} 2024-11-08T03:50:16,671 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-08T03:50:16,673 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-08T03:50:16,676 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 407 msec 2024-11-08T03:50:16,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-08T03:50:16,895 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T03:50:16,895 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-08T03:50:16,896 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T03:50:16,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-08T03:50:16,900 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-08T03:50:16,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-08T03:50:16,905 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e., hostname=350fccc412b5,44007,1731037815073, seqNum=2] 2024-11-08T03:50:16,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.3 flush TestHBaseWalOnEC 2024-11-08T03:50:16,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-08T03:50:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:16,914 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-08T03:50:16,916 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-08T03:50:16,917 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-08T03:50:17,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:17,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44007 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-08T03:50:17,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,078 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b14711407486c21d92d1f84e3cb4763e 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-08T03:50:17,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/.tmp/cf/fe4b70003c88437fac0726cd4a59745f is 36, key is row/cf:cq/1731037816907/Put/seqid=0 2024-11-08T03:50:17,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741839_1015 (size=4787) 2024-11-08T03:50:17,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741839_1015 (size=4787) 2024-11-08T03:50:17,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741839_1015 (size=4787) 2024-11-08T03:50:17,104 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/.tmp/cf/fe4b70003c88437fac0726cd4a59745f 2024-11-08T03:50:17,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/.tmp/cf/fe4b70003c88437fac0726cd4a59745f as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/cf/fe4b70003c88437fac0726cd4a59745f 2024-11-08T03:50:17,121 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/cf/fe4b70003c88437fac0726cd4a59745f, entries=1, sequenceid=5, filesize=4.7 K 2024-11-08T03:50:17,123 INFO [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for b14711407486c21d92d1f84e3cb4763e in 45ms, sequenceid=5, compaction requested=false 2024-11-08T03:50:17,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b14711407486c21d92d1f84e3cb4763e: 2024-11-08T03:50:17,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/350fccc412b5:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-08T03:50:17,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-08T03:50:17,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-08T03:50:17,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 210 msec 2024-11-08T03:50:17,133 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 221 msec 2024-11-08T03:50:17,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42065 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-08T03:50:17,236 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-08T03:50:17,243 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-08T03:50:17,244 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T03:50:17,244 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:17,244 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,245 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,245 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-08T03:50:17,245 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-08T03:50:17,245 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=333692838, stopped=false 2024-11-08T03:50:17,246 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=350fccc412b5,42065,1731037814849 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:17,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:17,314 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T03:50:17,315 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-08T03:50:17,316 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:17,316 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:17,316 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,316 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:17,316 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:17,316 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,37435,1731037815030' ***** 2024-11-08T03:50:17,316 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:17,317 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,44007,1731037815073' ***** 2024-11-08T03:50:17,317 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:17,317 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-08T03:50:17,317 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:17,317 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '350fccc412b5,46863,1731037815116' ***** 2024-11-08T03:50:17,317 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-08T03:50:17,317 INFO [RS:0;350fccc412b5:37435 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:17,317 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:17,317 INFO [RS:0;350fccc412b5:37435 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:17,317 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,37435,1731037815030 2024-11-08T03:50:17,318 INFO [RS:0;350fccc412b5:37435 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:17,318 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:17,318 INFO [RS:0;350fccc412b5:37435 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;350fccc412b5:37435. 2024-11-08T03:50:17,318 INFO [RS:2;350fccc412b5:46863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:17,318 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:17,318 INFO [RS:2;350fccc412b5:46863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:17,318 DEBUG [RS:0;350fccc412b5:37435 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:17,318 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,46863,1731037815116 2024-11-08T03:50:17,318 DEBUG [RS:0;350fccc412b5:37435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,318 INFO [RS:2;350fccc412b5:46863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:17,319 INFO [RS:2;350fccc412b5:46863 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;350fccc412b5:46863. 2024-11-08T03:50:17,319 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,37435,1731037815030; all regions closed. 2024-11-08T03:50:17,319 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-08T03:50:17,319 INFO [RS:1;350fccc412b5:44007 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-08T03:50:17,319 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-08T03:50:17,319 DEBUG [RS:2;350fccc412b5:46863 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:17,319 INFO [RS:1;350fccc412b5:44007 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-08T03:50:17,319 DEBUG [RS:2;350fccc412b5:46863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,319 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(3091): Received CLOSE for b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:17,319 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,46863,1731037815116; all regions closed. 2024-11-08T03:50:17,320 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(959): stopping server 350fccc412b5,44007,1731037815073 2024-11-08T03:50:17,320 INFO [RS:1;350fccc412b5:44007 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:17,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,320 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,320 INFO [RS:1;350fccc412b5:44007 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;350fccc412b5:44007. 2024-11-08T03:50:17,320 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b14711407486c21d92d1f84e3cb4763e, disabling compactions & flushes 2024-11-08T03:50:17,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,320 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,320 INFO [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,320 DEBUG [RS:1;350fccc412b5:44007 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-08T03:50:17,320 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,320 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,320 DEBUG [RS:1;350fccc412b5:44007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,320 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. after waiting 0 ms 2024-11-08T03:50:17,321 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,321 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,321 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,321 INFO [RS:1;350fccc412b5:44007 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:17,321 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,321 INFO [RS:1;350fccc412b5:44007 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:17,321 INFO [RS:1;350fccc412b5:44007 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:17,321 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-08T03:50:17,323 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-08T03:50:17,324 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, b14711407486c21d92d1f84e3cb4763e=TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e.} 2024-11-08T03:50:17,324 DEBUG [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, b14711407486c21d92d1f84e3cb4763e 2024-11-08T03:50:17,324 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-08T03:50:17,324 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-08T03:50:17,324 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-08T03:50:17,324 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-08T03:50:17,324 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-08T03:50:17,324 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-08T03:50:17,325 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,325 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741833_1009 (size=93) 2024-11-08T03:50:17,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741833_1009 (size=93) 2024-11-08T03:50:17,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741833_1009 (size=93) 2024-11-08T03:50:17,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741835_1011 (size=93) 2024-11-08T03:50:17,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741835_1011 (size=93) 2024-11-08T03:50:17,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741835_1011 (size=93) 2024-11-08T03:50:17,330 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/default/TestHBaseWalOnEC/b14711407486c21d92d1f84e3cb4763e/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-08T03:50:17,331 DEBUG [RS:2;350fccc412b5:46863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs 2024-11-08T03:50:17,331 INFO [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,331 INFO [RS:2;350fccc412b5:46863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 350fccc412b5%2C46863%2C1731037815116:(num 1731037815811) 2024-11-08T03:50:17,331 DEBUG [RS:2;350fccc412b5:46863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,331 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b14711407486c21d92d1f84e3cb4763e: Waiting for close lock at 1731037817320Running coprocessor pre-close hooks at 1731037817320Disabling compacts and flushes for region at 1731037817320Disabling writes for close at 1731037817321 (+1 ms)Writing region close event to WAL at 1731037817324 (+3 ms)Running coprocessor post-close hooks at 1731037817331 (+7 ms)Closed at 1731037817331 2024-11-08T03:50:17,331 INFO [RS:2;350fccc412b5:46863 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,331 INFO [RS:2;350fccc412b5:46863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:17,331 DEBUG [RS_CLOSE_REGION-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e. 2024-11-08T03:50:17,331 INFO [RS:2;350fccc412b5:46863 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:17,332 INFO [RS:2;350fccc412b5:46863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:17,332 INFO [RS:2;350fccc412b5:46863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:17,332 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:17,332 INFO [RS:2;350fccc412b5:46863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:17,332 INFO [RS:2;350fccc412b5:46863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:17,332 INFO [RS:2;350fccc412b5:46863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:46863 2024-11-08T03:50:17,334 DEBUG [RS:0;350fccc412b5:37435 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 350fccc412b5%2C37435%2C1731037815030:(num 1731037815812) 2024-11-08T03:50:17,335 DEBUG [RS:0;350fccc412b5:37435 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-08T03:50:17,335 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:17,335 INFO [RS:0;350fccc412b5:37435 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:37435 2024-11-08T03:50:17,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,46863,1731037815116 2024-11-08T03:50:17,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:17,343 INFO [RS:2;350fccc412b5:46863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:17,343 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$377/0x00007f0fd88f70f0@6df3b5e2 rejected from java.util.concurrent.ThreadPoolExecutor@28f12638[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-08T03:50:17,347 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/info/54eec5eeab074f5294065ee49e34e060 is 153, key is TestHBaseWalOnEC,,1731037816266.b14711407486c21d92d1f84e3cb4763e./info:regioninfo/1731037816651/Put/seqid=0 2024-11-08T03:50:17,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,37435,1731037815030 2024-11-08T03:50:17,353 INFO [RS:0;350fccc412b5:37435 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:17,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741840_1016 (size=6637) 2024-11-08T03:50:17,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741840_1016 (size=6637) 2024-11-08T03:50:17,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741840_1016 (size=6637) 2024-11-08T03:50:17,355 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/info/54eec5eeab074f5294065ee49e34e060 2024-11-08T03:50:17,364 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,37435,1731037815030] 2024-11-08T03:50:17,375 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,378 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,378 INFO [regionserver/350fccc412b5:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,385 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,37435,1731037815030 already deleted, retry=false 2024-11-08T03:50:17,385 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,37435,1731037815030 expired; onlineServers=2 2024-11-08T03:50:17,385 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,46863,1731037815116] 2024-11-08T03:50:17,386 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/ns/1c34d553c48f4fcf87a3900ffa059cea is 43, key is default/ns:d/1731037816213/Put/seqid=0 2024-11-08T03:50:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741841_1017 (size=5153) 2024-11-08T03:50:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741841_1017 (size=5153) 2024-11-08T03:50:17,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741841_1017 (size=5153) 2024-11-08T03:50:17,395 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/ns/1c34d553c48f4fcf87a3900ffa059cea 2024-11-08T03:50:17,398 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,46863,1731037815116 already deleted, retry=false 2024-11-08T03:50:17,398 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,46863,1731037815116 expired; onlineServers=1 2024-11-08T03:50:17,416 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/table/b9f2198d0b3c498ea461ae019ca37754 is 52, key is TestHBaseWalOnEC/table:state/1731037816668/Put/seqid=0 2024-11-08T03:50:17,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741842_1018 (size=5249) 2024-11-08T03:50:17,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741842_1018 (size=5249) 2024-11-08T03:50:17,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741842_1018 (size=5249) 2024-11-08T03:50:17,425 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/table/b9f2198d0b3c498ea461ae019ca37754 2024-11-08T03:50:17,432 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/info/54eec5eeab074f5294065ee49e34e060 as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/info/54eec5eeab074f5294065ee49e34e060 2024-11-08T03:50:17,440 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/info/54eec5eeab074f5294065ee49e34e060, entries=10, sequenceid=11, filesize=6.5 K 2024-11-08T03:50:17,441 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/ns/1c34d553c48f4fcf87a3900ffa059cea as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/ns/1c34d553c48f4fcf87a3900ffa059cea 2024-11-08T03:50:17,448 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/ns/1c34d553c48f4fcf87a3900ffa059cea, entries=2, sequenceid=11, filesize=5.0 K 2024-11-08T03:50:17,450 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/.tmp/table/b9f2198d0b3c498ea461ae019ca37754 as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/table/b9f2198d0b3c498ea461ae019ca37754 2024-11-08T03:50:17,457 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/table/b9f2198d0b3c498ea461ae019ca37754, entries=2, sequenceid=11, filesize=5.1 K 2024-11-08T03:50:17,458 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-11-08T03:50:17,464 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-08T03:50:17,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,464 INFO [RS:2;350fccc412b5:46863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:17,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46863-0x1011890dbcf0003, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,464 INFO [RS:2;350fccc412b5:46863 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,46863,1731037815116; zookeeper connection closed. 2024-11-08T03:50:17,464 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1eb48d7b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1eb48d7b 2024-11-08T03:50:17,465 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-08T03:50:17,465 INFO [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:17,465 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731037817324Running coprocessor pre-close hooks at 1731037817324Disabling compacts and flushes for region at 1731037817324Disabling writes for close at 1731037817324Obtaining lock to block concurrent updates at 1731037817324Preparing flush snapshotting stores in 1588230740 at 1731037817324Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731037817325 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731037817327 (+2 ms)Flushing 1588230740/info: creating writer at 1731037817327Flushing 1588230740/info: appending metadata at 1731037817347 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731037817347Flushing 1588230740/ns: creating writer at 1731037817365 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731037817386 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1731037817386Flushing 1588230740/table: creating writer at 1731037817402 (+16 ms)Flushing 1588230740/table: appending metadata at 1731037817416 (+14 ms)Flushing 1588230740/table: closing flushed file at 1731037817416Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ae00dbc: reopening flushed file at 1731037817431 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67cd2564: reopening flushed file at 1731037817440 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@588e6a57: reopening flushed file at 1731037817448 (+8 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1731037817458 (+10 ms)Writing region close event to WAL at 1731037817460 (+2 ms)Running coprocessor post-close hooks at 1731037817465 (+5 ms)Closed at 1731037817465 2024-11-08T03:50:17,465 DEBUG [RS_CLOSE_META-regionserver/350fccc412b5:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-08T03:50:17,475 INFO [RS:0;350fccc412b5:37435 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:17,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37435-0x1011890dbcf0001, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,475 INFO [RS:0;350fccc412b5:37435 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,37435,1731037815030; zookeeper connection closed. 2024-11-08T03:50:17,475 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@46d66b6f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@46d66b6f 2024-11-08T03:50:17,524 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(976): stopping server 350fccc412b5,44007,1731037815073; all regions closed. 2024-11-08T03:50:17,525 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,525 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,525 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,525 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,525 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741836_1012 (size=2751) 2024-11-08T03:50:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741836_1012 (size=2751) 2024-11-08T03:50:17,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741836_1012 (size=2751) 2024-11-08T03:50:17,531 DEBUG [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs 2024-11-08T03:50:17,531 INFO [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 350fccc412b5%2C44007%2C1731037815073.meta:.meta(num 1731037816153) 2024-11-08T03:50:17,531 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,532 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,532 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,532 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,532 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:17,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741834_1010 (size=1298) 2024-11-08T03:50:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741834_1010 (size=1298) 2024-11-08T03:50:17,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741834_1010 (size=1298) 2024-11-08T03:50:17,538 DEBUG [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/oldWALs 2024-11-08T03:50:17,538 INFO [RS:1;350fccc412b5:44007 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 350fccc412b5%2C44007%2C1731037815073:(num 1731037815812) 2024-11-08T03:50:17,538 DEBUG [RS:1;350fccc412b5:44007 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-08T03:50:17,538 INFO [RS:1;350fccc412b5:44007 {}] regionserver.LeaseManager(133): Closed leases 2024-11-08T03:50:17,538 INFO [RS:1;350fccc412b5:44007 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:17,538 INFO [RS:1;350fccc412b5:44007 {}] hbase.ChoreService(370): Chore service for: regionserver/350fccc412b5:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:17,539 INFO [RS:1;350fccc412b5:44007 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:17,539 INFO [regionserver/350fccc412b5:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:17,539 INFO [RS:1;350fccc412b5:44007 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:44007 2024-11-08T03:50:17,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/350fccc412b5,44007,1731037815073 2024-11-08T03:50:17,545 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-08T03:50:17,545 INFO [RS:1;350fccc412b5:44007 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:17,556 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [350fccc412b5,44007,1731037815073] 2024-11-08T03:50:17,566 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/350fccc412b5,44007,1731037815073 already deleted, retry=false 2024-11-08T03:50:17,566 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 350fccc412b5,44007,1731037815073 expired; onlineServers=0 2024-11-08T03:50:17,566 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '350fccc412b5,42065,1731037814849' ***** 2024-11-08T03:50:17,567 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-08T03:50:17,567 INFO [M:0;350fccc412b5:42065 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-08T03:50:17,567 INFO [M:0;350fccc412b5:42065 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-08T03:50:17,567 DEBUG [M:0;350fccc412b5:42065 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-08T03:50:17,567 DEBUG [M:0;350fccc412b5:42065 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-08T03:50:17,567 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-08T03:50:17,567 DEBUG [master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037815493 {}] cleaner.HFileCleaner(306): Exit Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.large.0-1731037815493,5,FailOnTimeoutGroup] 2024-11-08T03:50:17,567 INFO [M:0;350fccc412b5:42065 {}] hbase.ChoreService(370): Chore service for: master/350fccc412b5:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-08T03:50:17,567 INFO [M:0;350fccc412b5:42065 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-08T03:50:17,568 DEBUG [M:0;350fccc412b5:42065 {}] master.HMaster(1795): Stopping service threads 2024-11-08T03:50:17,568 INFO [M:0;350fccc412b5:42065 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-08T03:50:17,567 DEBUG [master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037815493 {}] cleaner.HFileCleaner(306): Exit Thread[master/350fccc412b5:0:becomeActiveMaster-HFileCleaner.small.0-1731037815493,5,FailOnTimeoutGroup] 2024-11-08T03:50:17,568 INFO [M:0;350fccc412b5:42065 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-08T03:50:17,568 INFO [M:0;350fccc412b5:42065 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-08T03:50:17,568 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-08T03:50:17,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-08T03:50:17,577 DEBUG [M:0;350fccc412b5:42065 {}] zookeeper.ZKUtil(347): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-08T03:50:17,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-08T03:50:17,577 WARN [M:0;350fccc412b5:42065 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-08T03:50:17,578 INFO [M:0;350fccc412b5:42065 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/.lastflushedseqids 2024-11-08T03:50:17,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741843_1019 (size=127) 2024-11-08T03:50:17,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741843_1019 (size=127) 2024-11-08T03:50:17,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741843_1019 (size=127) 2024-11-08T03:50:17,589 INFO [M:0;350fccc412b5:42065 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-08T03:50:17,589 INFO [M:0;350fccc412b5:42065 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-08T03:50:17,590 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-08T03:50:17,590 INFO [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:17,590 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:17,590 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-08T03:50:17,590 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:17,590 INFO [M:0;350fccc412b5:42065 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-08T03:50:17,608 DEBUG [M:0;350fccc412b5:42065 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff0f179b49d943028cda22a0eed0ae52 is 82, key is hbase:meta,,1/info:regioninfo/1731037816190/Put/seqid=0 2024-11-08T03:50:17,609 WARN [IPC Server handler 3 on default port 38775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-08T03:50:17,610 WARN [IPC Server handler 3 on default port 38775 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-08T03:50:17,610 WARN [IPC Server handler 3 on default port 38775 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-08T03:50:17,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741844_1020 (size=5672) 2024-11-08T03:50:17,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741844_1020 (size=5672) 2024-11-08T03:50:17,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,656 INFO [RS:1;350fccc412b5:44007 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:17,656 INFO [RS:1;350fccc412b5:44007 {}] regionserver.HRegionServer(1031): Exiting; stopping=350fccc412b5,44007,1731037815073; zookeeper connection closed. 2024-11-08T03:50:17,656 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44007-0x1011890dbcf0002, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:17,657 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3449f258 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3449f258 2024-11-08T03:50:17,657 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-08T03:50:18,017 INFO [M:0;350fccc412b5:42065 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff0f179b49d943028cda22a0eed0ae52 2024-11-08T03:50:18,050 DEBUG [M:0;350fccc412b5:42065 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d82249f3428142079870b63132bd9acb is 749, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731037816675/Put/seqid=0 2024-11-08T03:50:18,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741845_1021 (size=6440) 2024-11-08T03:50:18,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741845_1021 (size=6440) 2024-11-08T03:50:18,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741845_1021 (size=6440) 2024-11-08T03:50:18,057 INFO [M:0;350fccc412b5:42065 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.16 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d82249f3428142079870b63132bd9acb 2024-11-08T03:50:18,078 DEBUG [M:0;350fccc412b5:42065 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb653fc2e96340d8a08f25c61b3b96b7 is 69, key is 350fccc412b5,37435,1731037815030/rs:state/1731037815605/Put/seqid=0 2024-11-08T03:50:18,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741846_1022 (size=5294) 2024-11-08T03:50:18,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741846_1022 (size=5294) 2024-11-08T03:50:18,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741846_1022 (size=5294) 2024-11-08T03:50:18,086 INFO [M:0;350fccc412b5:42065 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb653fc2e96340d8a08f25c61b3b96b7 2024-11-08T03:50:18,092 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ff0f179b49d943028cda22a0eed0ae52 as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ff0f179b49d943028cda22a0eed0ae52 2024-11-08T03:50:18,099 INFO [M:0;350fccc412b5:42065 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ff0f179b49d943028cda22a0eed0ae52, entries=8, sequenceid=72, filesize=5.5 K 2024-11-08T03:50:18,100 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/d82249f3428142079870b63132bd9acb as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d82249f3428142079870b63132bd9acb 2024-11-08T03:50:18,106 INFO [M:0;350fccc412b5:42065 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/d82249f3428142079870b63132bd9acb, entries=8, sequenceid=72, filesize=6.3 K 2024-11-08T03:50:18,108 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/fb653fc2e96340d8a08f25c61b3b96b7 as hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fb653fc2e96340d8a08f25c61b3b96b7 2024-11-08T03:50:18,114 INFO [M:0;350fccc412b5:42065 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38775/user/jenkins/test-data/f9bd32ca-37ea-5ce9-8aab-5ac80c1df9b1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/fb653fc2e96340d8a08f25c61b3b96b7, entries=3, sequenceid=72, filesize=5.2 K 2024-11-08T03:50:18,116 INFO [M:0;350fccc412b5:42065 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 526ms, sequenceid=72, compaction requested=false 2024-11-08T03:50:18,117 INFO [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-08T03:50:18,117 DEBUG [M:0;350fccc412b5:42065 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731037817590Disabling compacts and flushes for region at 1731037817590Disabling writes for close at 1731037817590Obtaining lock to block concurrent updates at 1731037817590Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731037817590Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27483, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731037817590Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731037817591 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731037817591Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731037817607 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731037817607Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731037818029 (+422 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731037818049 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731037818049Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731037818063 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731037818078 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731037818078Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@767fbf38: reopening flushed file at 1731037818091 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45d6710a: reopening flushed file at 1731037818099 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@492cf15f: reopening flushed file at 1731037818106 (+7 ms)Finished flush of dataSize ~26.84 KB/27483, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 526ms, sequenceid=72, compaction requested=false at 1731037818116 (+10 ms)Writing region close event to WAL at 1731037818117 (+1 ms)Closed at 1731037818117 2024-11-08T03:50:18,117 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:18,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:18,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:18,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:18,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-08T03:50:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42013 is added to blk_1073741830_1006 (size=32686) 2024-11-08T03:50:18,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33003 is added to blk_1073741830_1006 (size=32686) 2024-11-08T03:50:18,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43531 is added to blk_1073741830_1006 (size=32686) 2024-11-08T03:50:18,121 INFO [M:0;350fccc412b5:42065 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-08T03:50:18,121 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-08T03:50:18,121 INFO [M:0;350fccc412b5:42065 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.3:42065 2024-11-08T03:50:18,122 INFO [M:0;350fccc412b5:42065 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-08T03:50:18,267 INFO [M:0;350fccc412b5:42065 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-08T03:50:18,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:18,267 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42065-0x1011890dbcf0000, quorum=127.0.0.1:55842, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-08T03:50:18,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2cad0990{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:18,270 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3aa37294{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:18,270 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:18,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1de5d3bb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:18,270 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68281a7c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:18,272 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:18,272 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:18,272 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:18,272 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060482000-172.17.0.3-1731037811879 (Datanode Uuid 7888a538-ce46-400c-ac32-cb7cb76b633c) service to localhost/127.0.0.1:38775 2024-11-08T03:50:18,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data5/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,273 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data6/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,273 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:18,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f04efdd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:18,275 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1777b071{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:18,275 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:18,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@cd43b0e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:18,275 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45f9ed01{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:18,276 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:18,276 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:18,277 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:18,277 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060482000-172.17.0.3-1731037811879 (Datanode Uuid 342b4667-e136-4271-9ffa-e18fd6e7295d) service to localhost/127.0.0.1:38775 2024-11-08T03:50:18,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data3/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,277 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data4/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,278 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:18,280 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@8cdf55{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-08T03:50:18,281 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5d6b32e0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:18,281 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:18,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f39fb56{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:18,281 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47a1471c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:18,282 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-08T03:50:18,282 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-08T03:50:18,282 WARN [BP-2060482000-172.17.0.3-1731037811879 heartbeating to localhost/127.0.0.1:38775 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2060482000-172.17.0.3-1731037811879 (Datanode Uuid e849586a-c2bf-4c42-a156-b8e015412504) service to localhost/127.0.0.1:38775 2024-11-08T03:50:18,282 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-08T03:50:18,283 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data1/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,283 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/cluster_b9fa9963-a86e-c430-7be1-97b0a7beffc6/data/data2/current/BP-2060482000-172.17.0.3-1731037811879 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-08T03:50:18,283 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-08T03:50:18,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@574dcc44{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-08T03:50:18,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4a66f025{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-08T03:50:18,289 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-08T03:50:18,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c37558{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-08T03:50:18,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b990189{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eaa15c5d-56ed-8e9d-d6e4-2e8e9fd06b37/hadoop.log.dir/,STOPPED} 2024-11-08T03:50:18,295 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-08T03:50:18,320 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-08T03:50:18,328 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=153 (was 94) - Thread LEAK? -, OpenFileDescriptor=518 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=108 (was 65) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=12612 (was 12786)