2024-12-09 18:48:05,863 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 18:48:05,874 main DEBUG Took 0.009435 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-09 18:48:05,874 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-09 18:48:05,875 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-09 18:48:05,875 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-09 18:48:05,877 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,885 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-09 18:48:05,902 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,903 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,904 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,904 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,904 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,905 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,905 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,906 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,906 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,906 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,907 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,907 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,908 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,908 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,908 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,909 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,909 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,910 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,910 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,910 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,911 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,911 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,911 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-09 18:48:05,912 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,912 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-09 18:48:05,913 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-09 18:48:05,915 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-09 18:48:05,916 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-09 18:48:05,917 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-09 18:48:05,918 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-09 18:48:05,918 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-09 18:48:05,927 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-09 18:48:05,930 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-09 18:48:05,932 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-09 18:48:05,932 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-09 18:48:05,932 main DEBUG createAppenders(={Console}) 2024-12-09 18:48:05,933 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-12-09 18:48:05,933 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-12-09 18:48:05,934 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-12-09 18:48:05,934 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-09 18:48:05,934 main DEBUG OutputStream closed 2024-12-09 18:48:05,934 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-09 18:48:05,935 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-09 18:48:05,935 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-12-09 18:48:05,996 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-09 18:48:05,998 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-09 18:48:05,999 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-09 18:48:06,000 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-09 18:48:06,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-09 18:48:06,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-09 18:48:06,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-09 18:48:06,001 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-09 18:48:06,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-09 18:48:06,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-09 18:48:06,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-09 18:48:06,002 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-09 18:48:06,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-09 18:48:06,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-09 18:48:06,003 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-09 18:48:06,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-09 18:48:06,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-09 18:48:06,004 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-09 18:48:06,006 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09 18:48:06,007 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-12-09 18:48:06,007 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-09 18:48:06,008 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-12-09T18:48:06,021 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-12-09 18:48:06,023 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-09 18:48:06,024 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-09T18:48:06,226 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28 2024-12-09T18:48:06,248 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0, deleteOnExit=true 2024-12-09T18:48:06,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/test.cache.data in system properties and HBase conf 2024-12-09T18:48:06,249 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T18:48:06,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir in system properties and HBase conf 2024-12-09T18:48:06,250 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T18:48:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T18:48:06,251 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T18:48:06,335 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-09T18:48:06,418 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T18:48:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T18:48:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T18:48:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T18:48:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T18:48:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T18:48:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T18:48:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T18:48:06,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T18:48:06,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T18:48:06,425 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/nfs.dump.dir in system properties and HBase conf 2024-12-09T18:48:06,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/java.io.tmpdir in system properties and HBase conf 2024-12-09T18:48:06,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T18:48:06,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T18:48:06,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T18:48:07,316 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-09T18:48:07,384 INFO [Time-limited test {}] log.Log(170): Logging initialized @2129ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-09T18:48:07,449 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:07,505 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:07,522 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:07,523 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:07,524 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T18:48:07,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:07,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:07,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:07,693 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/java.io.tmpdir/jetty-localhost-39765-hadoop-hdfs-3_4_1-tests_jar-_-any-1974919650973267496/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T18:48:07,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:39765} 2024-12-09T18:48:07,702 INFO [Time-limited test {}] server.Server(415): Started @2447ms 2024-12-09T18:48:08,164 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:08,170 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:08,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:08,172 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:08,172 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T18:48:08,173 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:08,174 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:08,269 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4839957b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/java.io.tmpdir/jetty-localhost-37315-hadoop-hdfs-3_4_1-tests_jar-_-any-13168529372941914637/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:08,270 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:37315} 2024-12-09T18:48:08,270 INFO [Time-limited test {}] server.Server(415): Started @3016ms 2024-12-09T18:48:08,315 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:08,416 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:08,423 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:08,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:08,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:08,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T18:48:08,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:08,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:08,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c6b8f01{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/java.io.tmpdir/jetty-localhost-38257-hadoop-hdfs-3_4_1-tests_jar-_-any-16217184403472447755/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:08,529 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:38257} 2024-12-09T18:48:08,529 INFO [Time-limited test {}] server.Server(415): Started @3275ms 2024-12-09T18:48:08,531 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:08,562 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:08,566 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:08,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:08,569 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:08,569 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T18:48:08,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:08,571 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:08,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2e59159d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/java.io.tmpdir/jetty-localhost-39367-hadoop-hdfs-3_4_1-tests_jar-_-any-15576062153779915164/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:08,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:39367} 2024-12-09T18:48:08,665 INFO [Time-limited test {}] server.Server(415): Started @3410ms 2024-12-09T18:48:08,666 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:09,460 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data3/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,460 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data2/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,460 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data4/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,460 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data1/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,475 WARN [Thread-132 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data5/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,475 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data6/current/BP-780308751-172.17.0.2-1733770086886/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:09,498 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:09,498 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:09,499 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:09,541 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2597e59c5d9af829 with lease ID 0x1705ec55d0f5564a: Processing first storage report for DS-b6ab3318-0f69-4337-a245-b302eeec4024 from datanode DatanodeRegistration(127.0.0.1:39629, datanodeUuid=033ac5c8-e995-42b6-b8f7-41cb62aa8932, infoPort=45707, infoSecurePort=0, ipcPort=40091, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2597e59c5d9af829 with lease ID 0x1705ec55d0f5564a: from storage DS-b6ab3318-0f69-4337-a245-b302eeec4024 node DatanodeRegistration(127.0.0.1:39629, datanodeUuid=033ac5c8-e995-42b6-b8f7-41cb62aa8932, infoPort=45707, infoSecurePort=0, ipcPort=40091, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,543 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4b39f48e8b371fb with lease ID 0x1705ec55d0f55648: Processing first storage report for DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4 from datanode DatanodeRegistration(127.0.0.1:36227, datanodeUuid=ad607743-4c54-48b4-8e12-1a0c325ff579, infoPort=33397, infoSecurePort=0, ipcPort=44707, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,543 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4b39f48e8b371fb with lease ID 0x1705ec55d0f55648: from storage DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4 node DatanodeRegistration(127.0.0.1:36227, datanodeUuid=ad607743-4c54-48b4-8e12-1a0c325ff579, infoPort=33397, infoSecurePort=0, ipcPort=44707, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4f9c9c1ec010337 with lease ID 0x1705ec55d0f55649: Processing first storage report for DS-9498e315-da14-46ce-98bd-bcdde1c77e84 from datanode DatanodeRegistration(127.0.0.1:37907, datanodeUuid=2af04275-d0fe-4d39-be17-e6822b952cee, infoPort=42745, infoSecurePort=0, ipcPort=33781, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4f9c9c1ec010337 with lease ID 0x1705ec55d0f55649: from storage DS-9498e315-da14-46ce-98bd-bcdde1c77e84 node DatanodeRegistration(127.0.0.1:37907, datanodeUuid=2af04275-d0fe-4d39-be17-e6822b952cee, infoPort=42745, infoSecurePort=0, ipcPort=33781, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2597e59c5d9af829 with lease ID 0x1705ec55d0f5564a: Processing first storage report for DS-9c776599-07af-4832-b470-52e7f71c9600 from datanode DatanodeRegistration(127.0.0.1:39629, datanodeUuid=033ac5c8-e995-42b6-b8f7-41cb62aa8932, infoPort=45707, infoSecurePort=0, ipcPort=40091, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2597e59c5d9af829 with lease ID 0x1705ec55d0f5564a: from storage DS-9c776599-07af-4832-b470-52e7f71c9600 node DatanodeRegistration(127.0.0.1:39629, datanodeUuid=033ac5c8-e995-42b6-b8f7-41cb62aa8932, infoPort=45707, infoSecurePort=0, ipcPort=40091, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,544 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4b39f48e8b371fb with lease ID 0x1705ec55d0f55648: Processing first storage report for DS-e6a5494e-1df5-4721-995c-9c6130bf1f8b from datanode DatanodeRegistration(127.0.0.1:36227, datanodeUuid=ad607743-4c54-48b4-8e12-1a0c325ff579, infoPort=33397, infoSecurePort=0, ipcPort=44707, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4b39f48e8b371fb with lease ID 0x1705ec55d0f55648: from storage DS-e6a5494e-1df5-4721-995c-9c6130bf1f8b node DatanodeRegistration(127.0.0.1:36227, datanodeUuid=ad607743-4c54-48b4-8e12-1a0c325ff579, infoPort=33397, infoSecurePort=0, ipcPort=44707, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xb4f9c9c1ec010337 with lease ID 0x1705ec55d0f55649: Processing first storage report for DS-9021482f-b994-4740-8acd-c37ba10fca74 from datanode DatanodeRegistration(127.0.0.1:37907, datanodeUuid=2af04275-d0fe-4d39-be17-e6822b952cee, infoPort=42745, infoSecurePort=0, ipcPort=33781, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886) 2024-12-09T18:48:09,545 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb4f9c9c1ec010337 with lease ID 0x1705ec55d0f55649: from storage DS-9021482f-b994-4740-8acd-c37ba10fca74 node DatanodeRegistration(127.0.0.1:37907, datanodeUuid=2af04275-d0fe-4d39-be17-e6822b952cee, infoPort=42745, infoSecurePort=0, ipcPort=33781, storageInfo=lv=-57;cid=testClusterID;nsid=1245421103;c=1733770086886), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:09,563 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28 2024-12-09T18:48:09,626 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-12-09T18:48:09,672 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=161, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=349, ProcessCount=11, AvailableMemoryMB=3098 2024-12-09T18:48:09,674 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T18:48:09,684 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-12-09T18:48:09,771 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/zookeeper_0, clientPort=51741, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T18:48:09,781 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51741 2024-12-09T18:48:09,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:09,792 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:09,865 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:09,865 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:09,901 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:51404 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51404 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:09,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775792_1002 (size=7) 2024-12-09T18:48:10,319 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:10,332 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 with version=8 2024-12-09T18:48:10,333 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/hbase-staging 2024-12-09T18:48:10,423 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-09T18:48:10,648 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:10,657 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:10,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:10,662 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:10,662 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:10,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:10,787 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T18:48:10,843 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-09T18:48:10,851 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-09T18:48:10,855 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:10,876 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 69411 (auto-detected) 2024-12-09T18:48:10,877 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-09T18:48:10,892 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32905 2024-12-09T18:48:10,910 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32905 connecting to ZooKeeper ensemble=127.0.0.1:51741 2024-12-09T18:48:11,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329050x0, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:11,019 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32905-0x1000c1f31990000 connected 2024-12-09T18:48:11,085 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,099 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:11,103 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95, hbase.cluster.distributed=false 2024-12-09T18:48:11,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:11,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32905 2024-12-09T18:48:11,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32905 2024-12-09T18:48:11,129 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32905 2024-12-09T18:48:11,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32905 2024-12-09T18:48:11,130 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32905 2024-12-09T18:48:11,217 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:11,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,219 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:11,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,219 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:11,222 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:11,224 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:11,225 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35405 2024-12-09T18:48:11,226 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35405 connecting to ZooKeeper ensemble=127.0.0.1:51741 2024-12-09T18:48:11,227 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,231 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,242 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:354050x0, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:11,243 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35405-0x1000c1f31990001 connected 2024-12-09T18:48:11,243 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:11,247 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:11,253 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:11,256 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:11,262 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:11,263 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35405 2024-12-09T18:48:11,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35405 2024-12-09T18:48:11,264 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35405 2024-12-09T18:48:11,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35405 2024-12-09T18:48:11,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35405 2024-12-09T18:48:11,283 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:11,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,283 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,284 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:11,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,284 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:11,284 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:11,284 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:11,285 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35827 2024-12-09T18:48:11,286 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35827 connecting to ZooKeeper ensemble=127.0.0.1:51741 2024-12-09T18:48:11,287 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,290 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358270x0, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:11,301 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35827-0x1000c1f31990002 connected 2024-12-09T18:48:11,301 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:11,302 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:11,303 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:11,304 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:11,306 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:11,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35827 2024-12-09T18:48:11,309 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35827 2024-12-09T18:48:11,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35827 2024-12-09T18:48:11,311 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35827 2024-12-09T18:48:11,312 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35827 2024-12-09T18:48:11,330 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:11,330 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,331 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,331 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:11,331 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:11,331 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:11,331 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:11,332 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:11,333 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39317 2024-12-09T18:48:11,335 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39317 connecting to ZooKeeper ensemble=127.0.0.1:51741 2024-12-09T18:48:11,336 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,339 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,350 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:393170x0, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:11,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39317-0x1000c1f31990003 connected 2024-12-09T18:48:11,351 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:11,352 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:11,353 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:11,354 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:11,356 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:11,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39317 2024-12-09T18:48:11,359 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39317 2024-12-09T18:48:11,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39317 2024-12-09T18:48:11,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39317 2024-12-09T18:48:11,361 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39317 2024-12-09T18:48:11,373 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a437f8b9ba7d:32905 2024-12-09T18:48:11,373 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:11,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,384 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,387 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:11,416 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:11,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,416 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,417 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T18:48:11,419 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a437f8b9ba7d,32905,1733770090495 from backup master directory 2024-12-09T18:48:11,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:11,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,425 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,426 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:11,426 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:11,426 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:11,429 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-09T18:48:11,430 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-09T18:48:11,485 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/hbase.id] with ID: 56a5f87a-a29b-4a24-91e3-473af4c7a114 2024-12-09T18:48:11,485 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/.tmp/hbase.id 2024-12-09T18:48:11,491 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,491 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,494 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59434 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59434 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:11,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775776_1004 (size=42) 2024-12-09T18:48:11,500 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:11,500 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/.tmp/hbase.id]:[hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/hbase.id] 2024-12-09T18:48:11,544 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:11,548 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T18:48:11,565 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-12-09T18:48:11,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,575 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:11,588 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,588 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59456 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59456 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:11,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775760_1006 (size=196) 2024-12-09T18:48:11,598 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:11,610 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T18:48:11,612 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T18:48:11,616 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T18:48:11,640 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,640 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,644 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:51416 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51416 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:11,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775744_1008 (size=1189) 2024-12-09T18:48:11,650 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:11,664 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store 2024-12-09T18:48:11,677 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,677 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:11,680 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59474 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59474 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:11,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775728_1010 (size=34) 2024-12-09T18:48:11,689 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:11,692 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-09T18:48:11,695 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:11,696 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T18:48:11,696 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:11,696 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:11,697 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T18:48:11,697 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:11,697 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:11,698 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733770091695Disabling compacts and flushes for region at 1733770091695Disabling writes for close at 1733770091697 (+2 ms)Writing region close event to WAL at 1733770091697Closed at 1733770091697 2024-12-09T18:48:11,700 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/.initializing 2024-12-09T18:48:11,701 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/WALs/a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:11,708 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T18:48:11,721 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C32905%2C1733770090495, suffix=, logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/WALs/a437f8b9ba7d,32905,1733770090495, archiveDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/oldWALs, maxLogs=10 2024-12-09T18:48:11,754 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/WALs/a437f8b9ba7d,32905,1733770090495/a437f8b9ba7d%2C32905%2C1733770090495.1733770091726, exclude list is [], retry=0 2024-12-09T18:48:11,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:11,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39629,DS-b6ab3318-0f69-4337-a245-b302eeec4024,DISK] 2024-12-09T18:48:11,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37907,DS-9498e315-da14-46ce-98bd-bcdde1c77e84,DISK] 2024-12-09T18:48:11,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4,DISK] 2024-12-09T18:48:11,774 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-09T18:48:11,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/WALs/a437f8b9ba7d,32905,1733770090495/a437f8b9ba7d%2C32905%2C1733770090495.1733770091726 2024-12-09T18:48:11,811 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:42745:42745),(127.0.0.1/127.0.0.1:45707:45707),(127.0.0.1/127.0.0.1:33397:33397)] 2024-12-09T18:48:11,812 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:11,812 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:11,815 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,816 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,870 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T18:48:11,872 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:11,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:11,875 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,878 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T18:48:11,878 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:11,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:11,880 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T18:48:11,882 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:11,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:11,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T18:48:11,886 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:11,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:11,887 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,891 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,892 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,897 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,897 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,900 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:11,903 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:11,909 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:11,910 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62778668, jitterRate=-0.06452494859695435}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:11,916 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733770091826Initializing all the Stores at 1733770091828 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770091828Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770091829 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770091829Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770091829Cleaning up temporary data from old regions at 1733770091897 (+68 ms)Region opened successfully at 1733770091916 (+19 ms) 2024-12-09T18:48:11,917 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T18:48:11,947 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@688b096c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:11,973 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T18:48:11,982 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T18:48:11,982 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T18:48:11,984 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T18:48:11,985 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-12-09T18:48:11,989 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 3 msec 2024-12-09T18:48:11,989 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T18:48:12,010 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T18:48:12,018 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T18:48:12,050 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T18:48:12,053 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T18:48:12,055 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T18:48:12,066 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T18:48:12,068 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T18:48:12,071 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T18:48:12,075 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T18:48:12,076 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T18:48:12,083 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T18:48:12,102 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T18:48:12,108 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T18:48:12,117 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,117 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,121 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a437f8b9ba7d,32905,1733770090495, sessionid=0x1000c1f31990000, setting cluster-up flag (Was=false) 2024-12-09T18:48:12,150 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,150 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,175 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T18:48:12,181 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:12,200 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,200 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,225 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T18:48:12,228 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:12,237 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T18:48:12,264 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(746): ClusterId : 56a5f87a-a29b-4a24-91e3-473af4c7a114 2024-12-09T18:48:12,264 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(746): ClusterId : 56a5f87a-a29b-4a24-91e3-473af4c7a114 2024-12-09T18:48:12,264 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(746): ClusterId : 56a5f87a-a29b-4a24-91e3-473af4c7a114 2024-12-09T18:48:12,267 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:12,267 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:12,267 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:12,277 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:12,277 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:12,277 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:12,277 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:12,277 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:12,277 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:12,293 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:12,293 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:12,293 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:12,293 DEBUG [RS:1;a437f8b9ba7d:35827 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@105c3aed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:12,293 DEBUG [RS:0;a437f8b9ba7d:35405 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bc3072d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:12,299 DEBUG [RS:2;a437f8b9ba7d:39317 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78438a1e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:12,307 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a437f8b9ba7d:35827 2024-12-09T18:48:12,311 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:12,311 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a437f8b9ba7d:39317 2024-12-09T18:48:12,311 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:12,311 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:12,311 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:12,311 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:12,311 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:12,312 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a437f8b9ba7d:35405 2024-12-09T18:48:12,312 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:12,313 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:12,313 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:12,314 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:12,314 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=35827, startcode=1733770091282 2024-12-09T18:48:12,314 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=35405, startcode=1733770091187 2024-12-09T18:48:12,314 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=39317, startcode=1733770091329 2024-12-09T18:48:12,323 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T18:48:12,327 DEBUG [RS:1;a437f8b9ba7d:35827 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:12,327 DEBUG [RS:2;a437f8b9ba7d:39317 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:12,327 DEBUG [RS:0;a437f8b9ba7d:35405 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:12,330 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T18:48:12,336 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a437f8b9ba7d,32905,1733770090495 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T18:48:12,343 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:12,343 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a437f8b9ba7d:0, corePoolSize=10, maxPoolSize=10 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:12,344 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,357 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:12,358 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T18:48:12,361 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733770122360 2024-12-09T18:48:12,362 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42325, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:12,362 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44711, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:12,362 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45531, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:12,363 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T18:48:12,364 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T18:48:12,365 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:12,367 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T18:48:12,367 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T18:48:12,367 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T18:48:12,368 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T18:48:12,369 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T18:48:12,368 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,371 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T18:48:12,365 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T18:48:12,372 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T18:48:12,372 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T18:48:12,374 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T18:48:12,374 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T18:48:12,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T18:48:12,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-12-09T18:48:12,377 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770092376,5,FailOnTimeoutGroup] 2024-12-09T18:48:12,378 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770092378,5,FailOnTimeoutGroup] 2024-12-09T18:48:12,378 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,378 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T18:48:12,379 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:12,380 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:12,380 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,381 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,388 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:51440 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51440 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:12,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775712_1013 (size=1321) 2024-12-09T18:48:12,396 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:12,397 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T18:48:12,397 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T18:48:12,397 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T18:48:12,397 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-12-09T18:48:12,397 WARN [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T18:48:12,397 WARN [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T18:48:12,397 WARN [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-09T18:48:12,397 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 2024-12-09T18:48:12,405 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:12,405 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:12,409 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:51448 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51448 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:12,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775696_1015 (size=32) 2024-12-09T18:48:12,414 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:12,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:12,418 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T18:48:12,421 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T18:48:12,421 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:12,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:12,422 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T18:48:12,425 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T18:48:12,425 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:12,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:12,426 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T18:48:12,428 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T18:48:12,428 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:12,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:12,429 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T18:48:12,432 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T18:48:12,432 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:12,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:12,433 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T18:48:12,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740 2024-12-09T18:48:12,435 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740 2024-12-09T18:48:12,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T18:48:12,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T18:48:12,439 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:12,442 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T18:48:12,448 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:12,449 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65425359, jitterRate=-0.0250861793756485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:12,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733770092416Initializing all the Stores at 1733770092418 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770092418Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770092418Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770092418Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770092418Cleaning up temporary data from old regions at 1733770092438 (+20 ms)Region opened successfully at 1733770092451 (+13 ms) 2024-12-09T18:48:12,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T18:48:12,451 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T18:48:12,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T18:48:12,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T18:48:12,451 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T18:48:12,453 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:12,453 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733770092451Disabling compacts and flushes for region at 1733770092451Disabling writes for close at 1733770092451Writing region close event to WAL at 1733770092452 (+1 ms)Closed at 1733770092452 2024-12-09T18:48:12,456 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:12,456 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T18:48:12,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T18:48:12,469 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T18:48:12,472 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T18:48:12,499 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=35405, startcode=1733770091187 2024-12-09T18:48:12,499 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=35827, startcode=1733770091282 2024-12-09T18:48:12,499 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,32905,1733770090495 with port=39317, startcode=1733770091329 2024-12-09T18:48:12,501 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,503 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,510 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,510 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,511 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 2024-12-09T18:48:12,511 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42057 2024-12-09T18:48:12,511 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:12,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,513 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 2024-12-09T18:48:12,513 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32905 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,513 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42057 2024-12-09T18:48:12,513 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:12,516 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 2024-12-09T18:48:12,516 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42057 2024-12-09T18:48:12,516 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:12,524 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:12,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775789_1002 (size=7) 2024-12-09T18:48:12,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775788_1002 (size=7) 2024-12-09T18:48:12,555 DEBUG [RS:0;a437f8b9ba7d:35405 {}] zookeeper.ZKUtil(111): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,556 WARN [RS:0;a437f8b9ba7d:35405 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:12,556 DEBUG [RS:1;a437f8b9ba7d:35827 {}] zookeeper.ZKUtil(111): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,556 INFO [RS:0;a437f8b9ba7d:35405 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T18:48:12,556 DEBUG [RS:2;a437f8b9ba7d:39317 {}] zookeeper.ZKUtil(111): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,556 WARN [RS:1;a437f8b9ba7d:35827 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:12,556 WARN [RS:2;a437f8b9ba7d:39317 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:12,556 INFO [RS:2;a437f8b9ba7d:39317 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T18:48:12,556 INFO [RS:1;a437f8b9ba7d:35827 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T18:48:12,556 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,556 DEBUG [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,557 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,558 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,35827,1733770091282] 2024-12-09T18:48:12,558 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,35405,1733770091187] 2024-12-09T18:48:12,558 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,39317,1733770091329] 2024-12-09T18:48:12,582 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:12,582 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:12,582 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:12,594 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:12,594 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:12,594 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:12,600 INFO [RS:1;a437f8b9ba7d:35827 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:12,600 INFO [RS:0;a437f8b9ba7d:35405 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:12,600 INFO [RS:2;a437f8b9ba7d:39317 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:12,600 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,600 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,600 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,601 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:12,601 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:12,601 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:12,607 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:12,607 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:12,607 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:12,608 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,608 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,608 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,609 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:0;a437f8b9ba7d:35405 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,610 DEBUG [RS:2;a437f8b9ba7d:39317 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,610 DEBUG [RS:1;a437f8b9ba7d:35827 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,39317,1733770091329-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35827,1733770091282-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:12,612 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35405,1733770091187-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:12,623 WARN [a437f8b9ba7d:32905 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T18:48:12,634 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:12,636 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,39317,1733770091329-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,636 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,636 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.Replication(171): a437f8b9ba7d,39317,1733770091329 started 2024-12-09T18:48:12,641 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:12,641 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:12,641 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35405,1733770091187-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,641 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35827,1733770091282-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,641 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,641 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.Replication(171): a437f8b9ba7d,35405,1733770091187 started 2024-12-09T18:48:12,642 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,642 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.Replication(171): a437f8b9ba7d,35827,1733770091282 started 2024-12-09T18:48:12,659 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,659 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,660 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,35405,1733770091187, RpcServer on a437f8b9ba7d/172.17.0.2:35405, sessionid=0x1000c1f31990001 2024-12-09T18:48:12,660 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,39317,1733770091329, RpcServer on a437f8b9ba7d/172.17.0.2:39317, sessionid=0x1000c1f31990003 2024-12-09T18:48:12,660 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:12,660 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:12,661 DEBUG [RS:0;a437f8b9ba7d:35405 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,661 DEBUG [RS:2;a437f8b9ba7d:39317 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,661 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35405,1733770091187' 2024-12-09T18:48:12,661 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,39317,1733770091329' 2024-12-09T18:48:12,661 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:12,661 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:12,662 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:12,662 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:12,662 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:12,662 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:12,662 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:12,662 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:12,662 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,35827,1733770091282, RpcServer on a437f8b9ba7d/172.17.0.2:35827, sessionid=0x1000c1f31990002 2024-12-09T18:48:12,662 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:12,663 DEBUG [RS:0;a437f8b9ba7d:35405 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,663 DEBUG [RS:2;a437f8b9ba7d:39317 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:12,663 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35405,1733770091187' 2024-12-09T18:48:12,663 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,39317,1733770091329' 2024-12-09T18:48:12,663 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:12,663 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:12,663 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:12,663 DEBUG [RS:1;a437f8b9ba7d:35827 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,663 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35827,1733770091282' 2024-12-09T18:48:12,663 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:12,663 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:12,663 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:12,663 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:12,664 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:12,664 DEBUG [RS:2;a437f8b9ba7d:39317 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:12,664 DEBUG [RS:0;a437f8b9ba7d:35405 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:12,664 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:12,664 DEBUG [RS:1;a437f8b9ba7d:35827 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:12,664 INFO [RS:2;a437f8b9ba7d:39317 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:12,664 INFO [RS:0;a437f8b9ba7d:35405 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:12,664 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35827,1733770091282' 2024-12-09T18:48:12,664 INFO [RS:2;a437f8b9ba7d:39317 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:12,664 INFO [RS:0;a437f8b9ba7d:35405 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:12,664 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:12,665 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:12,665 DEBUG [RS:1;a437f8b9ba7d:35827 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:12,665 INFO [RS:1;a437f8b9ba7d:35827 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:12,665 INFO [RS:1;a437f8b9ba7d:35827 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:12,772 INFO [RS:2;a437f8b9ba7d:39317 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T18:48:12,773 INFO [RS:1;a437f8b9ba7d:35827 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T18:48:12,773 INFO [RS:0;a437f8b9ba7d:35405 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-09T18:48:12,778 INFO [RS:1;a437f8b9ba7d:35827 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C35827%2C1733770091282, suffix=, logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35827,1733770091282, archiveDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs, maxLogs=32 2024-12-09T18:48:12,778 INFO [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C35405%2C1733770091187, suffix=, logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187, archiveDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs, maxLogs=32 2024-12-09T18:48:12,778 INFO [RS:2;a437f8b9ba7d:39317 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C39317%2C1733770091329, suffix=, logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,39317,1733770091329, archiveDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs, maxLogs=32 2024-12-09T18:48:12,792 DEBUG [RS:1;a437f8b9ba7d:35827 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35827,1733770091282/a437f8b9ba7d%2C35827%2C1733770091282.1733770092781, exclude list is [], retry=0 2024-12-09T18:48:12,792 DEBUG [RS:2;a437f8b9ba7d:39317 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,39317,1733770091329/a437f8b9ba7d%2C39317%2C1733770091329.1733770092781, exclude list is [], retry=0 2024-12-09T18:48:12,792 DEBUG [RS:0;a437f8b9ba7d:35405 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187/a437f8b9ba7d%2C35405%2C1733770091187.1733770092781, exclude list is [], retry=0 2024-12-09T18:48:12,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4,DISK] 2024-12-09T18:48:12,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39629,DS-b6ab3318-0f69-4337-a245-b302eeec4024,DISK] 2024-12-09T18:48:12,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4,DISK] 2024-12-09T18:48:12,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37907,DS-9498e315-da14-46ce-98bd-bcdde1c77e84,DISK] 2024-12-09T18:48:12,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39629,DS-b6ab3318-0f69-4337-a245-b302eeec4024,DISK] 2024-12-09T18:48:12,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37907,DS-9498e315-da14-46ce-98bd-bcdde1c77e84,DISK] 2024-12-09T18:48:12,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37907,DS-9498e315-da14-46ce-98bd-bcdde1c77e84,DISK] 2024-12-09T18:48:12,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39629,DS-b6ab3318-0f69-4337-a245-b302eeec4024,DISK] 2024-12-09T18:48:12,824 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4,DISK] 2024-12-09T18:48:12,832 INFO [RS:2;a437f8b9ba7d:39317 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,39317,1733770091329/a437f8b9ba7d%2C39317%2C1733770091329.1733770092781 2024-12-09T18:48:12,832 INFO [RS:1;a437f8b9ba7d:35827 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35827,1733770091282/a437f8b9ba7d%2C35827%2C1733770091282.1733770092781 2024-12-09T18:48:12,832 DEBUG [RS:2;a437f8b9ba7d:39317 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33397:33397),(127.0.0.1/127.0.0.1:45707:45707),(127.0.0.1/127.0.0.1:42745:42745)] 2024-12-09T18:48:12,832 DEBUG [RS:1;a437f8b9ba7d:35827 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45707:45707),(127.0.0.1/127.0.0.1:42745:42745),(127.0.0.1/127.0.0.1:33397:33397)] 2024-12-09T18:48:12,833 INFO [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187/a437f8b9ba7d%2C35405%2C1733770091187.1733770092781 2024-12-09T18:48:12,834 DEBUG [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33397:33397),(127.0.0.1/127.0.0.1:42745:42745),(127.0.0.1/127.0.0.1:45707:45707)] 2024-12-09T18:48:12,875 DEBUG [a437f8b9ba7d:32905 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T18:48:12,881 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(204): Hosts are {a437f8b9ba7d=0} racks are {/default-rack=0} 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T18:48:12,888 INFO [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T18:48:12,888 INFO [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T18:48:12,888 INFO [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T18:48:12,888 DEBUG [a437f8b9ba7d:32905 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T18:48:12,894 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:12,900 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a437f8b9ba7d,35405,1733770091187, state=OPENING 2024-12-09T18:48:12,917 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T18:48:12,991 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:12,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:12,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:12,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:12,993 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:12,995 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T18:48:12,998 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,35405,1733770091187}] 2024-12-09T18:48:13,176 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T18:48:13,178 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T18:48:13,189 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T18:48:13,189 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-09T18:48:13,190 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-09T18:48:13,193 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C35405%2C1733770091187.meta, suffix=.meta, logDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187, archiveDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs, maxLogs=32 2024-12-09T18:48:13,207 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187/a437f8b9ba7d%2C35405%2C1733770091187.meta.1733770093195.meta, exclude list is [], retry=0 2024-12-09T18:48:13,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:36227,DS-8effcba6-72dc-4d14-a9a1-72a95c6dafe4,DISK] 2024-12-09T18:48:13,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37907,DS-9498e315-da14-46ce-98bd-bcdde1c77e84,DISK] 2024-12-09T18:48:13,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39629,DS-b6ab3318-0f69-4337-a245-b302eeec4024,DISK] 2024-12-09T18:48:13,214 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/WALs/a437f8b9ba7d,35405,1733770091187/a437f8b9ba7d%2C35405%2C1733770091187.meta.1733770093195.meta 2024-12-09T18:48:13,215 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33397:33397),(127.0.0.1/127.0.0.1:42745:42745),(127.0.0.1/127.0.0.1:45707:45707)] 2024-12-09T18:48:13,215 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:13,217 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T18:48:13,219 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T18:48:13,223 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T18:48:13,226 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T18:48:13,227 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:13,227 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T18:48:13,227 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T18:48:13,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T18:48:13,231 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T18:48:13,232 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,232 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:13,233 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T18:48:13,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T18:48:13,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:13,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T18:48:13,237 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T18:48:13,237 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,237 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:13,238 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T18:48:13,239 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T18:48:13,239 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,240 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:13,240 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T18:48:13,241 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740 2024-12-09T18:48:13,243 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740 2024-12-09T18:48:13,245 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T18:48:13,245 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T18:48:13,246 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:13,248 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T18:48:13,249 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58788987, jitterRate=-0.12397582828998566}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:13,250 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T18:48:13,251 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733770093227Writing region info on filesystem at 1733770093228 (+1 ms)Initializing all the Stores at 1733770093229 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770093229Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770093230 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770093230Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770093230Cleaning up temporary data from old regions at 1733770093245 (+15 ms)Running coprocessor post-open hooks at 1733770093250 (+5 ms)Region opened successfully at 1733770093251 (+1 ms) 2024-12-09T18:48:13,276 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733770093168 2024-12-09T18:48:13,286 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T18:48:13,287 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T18:48:13,289 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:13,291 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a437f8b9ba7d,35405,1733770091187, state=OPEN 2024-12-09T18:48:13,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:13,300 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:13,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:13,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:13,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:13,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:13,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:13,300 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:13,301 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:13,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T18:48:13,306 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,35405,1733770091187 in 304 msec 2024-12-09T18:48:13,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T18:48:13,314 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 847 msec 2024-12-09T18:48:13,317 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:13,317 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T18:48:13,333 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T18:48:13,334 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a437f8b9ba7d,35405,1733770091187, seqNum=-1] 2024-12-09T18:48:13,351 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:13,354 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53173, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:13,373 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.1060 sec 2024-12-09T18:48:13,373 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733770093373, completionTime=-1 2024-12-09T18:48:13,376 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T18:48:13,377 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T18:48:13,400 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T18:48:13,400 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733770153400 2024-12-09T18:48:13,401 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733770213400 2024-12-09T18:48:13,401 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 23 msec 2024-12-09T18:48:13,402 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T18:48:13,408 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,408 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,408 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,410 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a437f8b9ba7d:32905, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,410 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,411 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,417 DEBUG [master/a437f8b9ba7d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T18:48:13,438 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.012sec 2024-12-09T18:48:13,439 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T18:48:13,441 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T18:48:13,441 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T18:48:13,442 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T18:48:13,442 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T18:48:13,443 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:13,443 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T18:48:13,448 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T18:48:13,449 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T18:48:13,449 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,32905,1733770090495-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:13,488 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2b8794, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:13,492 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-09T18:48:13,493 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-09T18:48:13,496 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a437f8b9ba7d,32905,-1 for getting cluster id 2024-12-09T18:48:13,498 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T18:48:13,507 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '56a5f87a-a29b-4a24-91e3-473af4c7a114' 2024-12-09T18:48:13,510 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T18:48:13,510 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "56a5f87a-a29b-4a24-91e3-473af4c7a114" 2024-12-09T18:48:13,512 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50300aa9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:13,512 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a437f8b9ba7d,32905,-1] 2024-12-09T18:48:13,514 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T18:48:13,516 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:13,517 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52958, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T18:48:13,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5260075b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:13,521 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T18:48:13,527 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a437f8b9ba7d,35405,1733770091187, seqNum=-1] 2024-12-09T18:48:13,527 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:13,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35130, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:13,550 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:13,555 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T18:48:13,559 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:13,561 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@340addf1 2024-12-09T18:48:13,562 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T18:48:13,564 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52964, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T18:48:13,568 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T18:48:13,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T18:48:13,581 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T18:48:13,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T18:48:13,583 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,586 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T18:48:13,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:13,594 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:13,594 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:13,597 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59530 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59530 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:13,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775680_1021 (size=392) 2024-12-09T18:48:13,603 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:13,606 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => b6c9278ed73a07c0d9040b9b77c7d2a9, NAME => 'TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95 2024-12-09T18:48:13,611 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:13,612 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:13,617 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59550 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59550 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:13,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775664_1023 (size=51) 2024-12-09T18:48:13,622 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing b6c9278ed73a07c0d9040b9b77c7d2a9, disabling compactions & flushes 2024-12-09T18:48:13,623 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. after waiting 0 ms 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,623 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,623 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for b6c9278ed73a07c0d9040b9b77c7d2a9: Waiting for close lock at 1733770093623Disabling compacts and flushes for region at 1733770093623Disabling writes for close at 1733770093623Writing region close event to WAL at 1733770093623Closed at 1733770093623 2024-12-09T18:48:13,625 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T18:48:13,629 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733770093625"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733770093625"}]},"ts":"1733770093625"} 2024-12-09T18:48:13,634 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T18:48:13,636 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T18:48:13,638 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733770093636"}]},"ts":"1733770093636"} 2024-12-09T18:48:13,643 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T18:48:13,644 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a437f8b9ba7d=0} racks are {/default-rack=0} 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T18:48:13,645 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T18:48:13,645 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T18:48:13,645 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T18:48:13,645 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T18:48:13,647 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b6c9278ed73a07c0d9040b9b77c7d2a9, ASSIGN}] 2024-12-09T18:48:13,649 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b6c9278ed73a07c0d9040b9b77c7d2a9, ASSIGN 2024-12-09T18:48:13,651 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b6c9278ed73a07c0d9040b9b77c7d2a9, ASSIGN; state=OFFLINE, location=a437f8b9ba7d,39317,1733770091329; forceNewPlan=false, retain=false 2024-12-09T18:48:13,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:13,804 INFO [a437f8b9ba7d:32905 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T18:48:13,806 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6c9278ed73a07c0d9040b9b77c7d2a9, regionState=OPENING, regionLocation=a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:13,810 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b6c9278ed73a07c0d9040b9b77c7d2a9, ASSIGN because future has completed 2024-12-09T18:48:13,811 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6c9278ed73a07c0d9040b9b77c7d2a9, server=a437f8b9ba7d,39317,1733770091329}] 2024-12-09T18:48:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:13,966 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T18:48:13,968 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48207, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T18:48:13,976 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,976 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => b6c9278ed73a07c0d9040b9b77c7d2a9, NAME => 'TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:13,977 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,977 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:13,977 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,977 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,979 INFO [StoreOpener-b6c9278ed73a07c0d9040b9b77c7d2a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,981 INFO [StoreOpener-b6c9278ed73a07c0d9040b9b77c7d2a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b6c9278ed73a07c0d9040b9b77c7d2a9 columnFamilyName cf 2024-12-09T18:48:13,981 DEBUG [StoreOpener-b6c9278ed73a07c0d9040b9b77c7d2a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:13,982 INFO [StoreOpener-b6c9278ed73a07c0d9040b9b77c7d2a9-1 {}] regionserver.HStore(327): Store=b6c9278ed73a07c0d9040b9b77c7d2a9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:13,982 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,983 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,984 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,985 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,985 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,987 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,992 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:13,992 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened b6c9278ed73a07c0d9040b9b77c7d2a9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63372440, jitterRate=-0.055677056312561035}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T18:48:13,993 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:13,994 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for b6c9278ed73a07c0d9040b9b77c7d2a9: Running coprocessor pre-open hook at 1733770093977Writing region info on filesystem at 1733770093977Initializing all the Stores at 1733770093978 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770093978Cleaning up temporary data from old regions at 1733770093985 (+7 ms)Running coprocessor post-open hooks at 1733770093993 (+8 ms)Region opened successfully at 1733770093993 2024-12-09T18:48:13,996 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9., pid=6, masterSystemTime=1733770093965 2024-12-09T18:48:13,998 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:13,998 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,000 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=b6c9278ed73a07c0d9040b9b77c7d2a9, regionState=OPEN, openSeqNum=2, regionLocation=a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:14,006 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure b6c9278ed73a07c0d9040b9b77c7d2a9, server=a437f8b9ba7d,39317,1733770091329 because future has completed 2024-12-09T18:48:14,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T18:48:14,013 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure b6c9278ed73a07c0d9040b9b77c7d2a9, server=a437f8b9ba7d,39317,1733770091329 in 197 msec 2024-12-09T18:48:14,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T18:48:14,017 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=b6c9278ed73a07c0d9040b9b77c7d2a9, ASSIGN in 366 msec 2024-12-09T18:48:14,018 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T18:48:14,019 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733770094019"}]},"ts":"1733770094019"} 2024-12-09T18:48:14,022 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T18:48:14,024 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T18:48:14,027 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 452 msec 2024-12-09T18:48:14,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:14,216 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T18:48:14,217 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T18:48:14,219 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T18:48:14,225 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T18:48:14,226 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T18:48:14,226 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T18:48:14,233 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9., hostname=a437f8b9ba7d,39317,1733770091329, seqNum=2] 2024-12-09T18:48:14,234 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:14,236 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40872, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:14,244 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T18:48:14,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T18:48:14,250 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T18:48:14,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:14,252 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T18:48:14,254 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T18:48:14,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:14,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39317 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T18:48:14,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,424 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing b6c9278ed73a07c0d9040b9b77c7d2a9 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T18:48:14,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/.tmp/cf/80f4ded92534452d820deb89c1bc80f5 is 36, key is row/cf:cq/1733770094237/Put/seqid=0 2024-12-09T18:48:14,480 WARN [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:14,480 WARN [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:14,483 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292230213_22 at /127.0.0.1:41968 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41968 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:14,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775648_1025 (size=4787) 2024-12-09T18:48:14,488 WARN [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:14,488 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/.tmp/cf/80f4ded92534452d820deb89c1bc80f5 2024-12-09T18:48:14,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/.tmp/cf/80f4ded92534452d820deb89c1bc80f5 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/cf/80f4ded92534452d820deb89c1bc80f5 2024-12-09T18:48:14,544 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/cf/80f4ded92534452d820deb89c1bc80f5, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T18:48:14,551 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for b6c9278ed73a07c0d9040b9b77c7d2a9 in 126ms, sequenceid=5, compaction requested=false 2024-12-09T18:48:14,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-12-09T18:48:14,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for b6c9278ed73a07c0d9040b9b77c7d2a9: 2024-12-09T18:48:14,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T18:48:14,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T18:48:14,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:14,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T18:48:14,565 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 305 msec 2024-12-09T18:48:14,569 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 322 msec 2024-12-09T18:48:14,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32905 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:14,876 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T18:48:14,892 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T18:48:14,892 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T18:48:14,892 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:14,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,896 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,897 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T18:48:14,897 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T18:48:14,897 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=201166883, stopped=false 2024-12-09T18:48:14,897 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a437f8b9ba7d,32905,1733770090495 2024-12-09T18:48:14,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:14,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:14,934 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:14,934 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:14,934 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:14,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:14,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:14,935 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:14,935 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T18:48:14,936 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T18:48:14,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:14,936 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:14,936 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:14,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,35405,1733770091187' ***** 2024-12-09T18:48:14,937 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,35827,1733770091282' ***** 2024-12-09T18:48:14,937 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:14,938 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,39317,1733770091329' ***** 2024-12-09T18:48:14,938 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:14,938 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:14,938 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:14,939 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:14,939 INFO [RS:1;a437f8b9ba7d:35827 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:14,939 INFO [RS:0;a437f8b9ba7d:35405 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:14,939 INFO [RS:2;a437f8b9ba7d:39317 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:14,939 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:14,939 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:14,939 INFO [RS:2;a437f8b9ba7d:39317 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:14,939 INFO [RS:0;a437f8b9ba7d:35405 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:14,939 INFO [RS:1;a437f8b9ba7d:35827 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:14,939 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:14,939 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:14,939 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:14,939 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:14,939 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:14,939 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(3091): Received CLOSE for b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:14,939 INFO [RS:0;a437f8b9ba7d:35405 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a437f8b9ba7d:35405. 2024-12-09T18:48:14,939 INFO [RS:1;a437f8b9ba7d:35827 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a437f8b9ba7d:35827. 2024-12-09T18:48:14,939 DEBUG [RS:0;a437f8b9ba7d:35405 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:14,939 DEBUG [RS:1;a437f8b9ba7d:35827 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:14,939 DEBUG [RS:0;a437f8b9ba7d:35405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,940 DEBUG [RS:1;a437f8b9ba7d:35827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,940 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:14,940 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,35827,1733770091282; all regions closed. 2024-12-09T18:48:14,940 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:14,940 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:14,940 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:14,940 INFO [RS:2;a437f8b9ba7d:39317 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a437f8b9ba7d:39317. 2024-12-09T18:48:14,940 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:14,940 DEBUG [RS:2;a437f8b9ba7d:39317 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:14,940 DEBUG [RS:2;a437f8b9ba7d:39317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,940 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T18:48:14,940 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T18:48:14,940 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1325): Online Regions={b6c9278ed73a07c0d9040b9b77c7d2a9=TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9.} 2024-12-09T18:48:14,941 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing b6c9278ed73a07c0d9040b9b77c7d2a9, disabling compactions & flushes 2024-12-09T18:48:14,941 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T18:48:14,941 INFO [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,941 DEBUG [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1351): Waiting on b6c9278ed73a07c0d9040b9b77c7d2a9 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,941 DEBUG [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. after waiting 0 ms 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T18:48:14,941 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T18:48:14,941 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T18:48:14,942 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T18:48:14,942 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T18:48:14,942 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T18:48:14,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741828_1018 (size=93) 2024-12-09T18:48:14,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_1073741828_1018 (size=93) 2024-12-09T18:48:14,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_1073741828_1018 (size=93) 2024-12-09T18:48:14,954 DEBUG [RS:1;a437f8b9ba7d:35827 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs 2024-12-09T18:48:14,954 INFO [RS:1;a437f8b9ba7d:35827 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a437f8b9ba7d%2C35827%2C1733770091282:(num 1733770092781) 2024-12-09T18:48:14,954 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/default/TestHBaseWalOnEC/b6c9278ed73a07c0d9040b9b77c7d2a9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T18:48:14,954 DEBUG [RS:1;a437f8b9ba7d:35827 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:14,955 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:14,955 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:14,956 INFO [RS:1;a437f8b9ba7d:35827 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35827 2024-12-09T18:48:14,957 INFO [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,957 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for b6c9278ed73a07c0d9040b9b77c7d2a9: Waiting for close lock at 1733770094940Running coprocessor pre-close hooks at 1733770094940Disabling compacts and flushes for region at 1733770094940Disabling writes for close at 1733770094941 (+1 ms)Writing region close event to WAL at 1733770094942 (+1 ms)Running coprocessor post-close hooks at 1733770094955 (+13 ms)Closed at 1733770094957 (+2 ms) 2024-12-09T18:48:14,957 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9. 2024-12-09T18:48:14,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,35827,1733770091282 2024-12-09T18:48:14,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:14,967 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:14,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,35827,1733770091282] 2024-12-09T18:48:14,972 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/info/b92359de14d8418c827a5a5ce37ff598 is 153, key is TestHBaseWalOnEC,,1733770093565.b6c9278ed73a07c0d9040b9b77c7d2a9./info:regioninfo/1733770094000/Put/seqid=0 2024-12-09T18:48:14,975 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:14,975 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:14,978 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_763936367_22 at /127.0.0.1:59568 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59568 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:14,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775632_1027 (size=6637) 2024-12-09T18:48:14,982 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:14,982 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/info/b92359de14d8418c827a5a5ce37ff598 2024-12-09T18:48:14,983 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,35827,1733770091282 already deleted, retry=false 2024-12-09T18:48:14,983 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,35827,1733770091282 expired; onlineServers=2 2024-12-09T18:48:15,005 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/ns/acac69a6470147ff8827f51ac44a1cf8 is 43, key is default/ns:d/1733770093357/Put/seqid=0 2024-12-09T18:48:15,008 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,008 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,011 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_763936367_22 at /127.0.0.1:51508 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51508 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775616_1029 (size=5153) 2024-12-09T18:48:15,015 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,015 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/ns/acac69a6470147ff8827f51ac44a1cf8 2024-12-09T18:48:15,018 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:15,021 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:15,021 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:15,039 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/table/491b5fa827e946cf8777c485e105a970 is 52, key is TestHBaseWalOnEC/table:state/1733770094019/Put/seqid=0 2024-12-09T18:48:15,042 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,042 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,045 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_763936367_22 at /127.0.0.1:51522 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51522 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775600_1031 (size=5249) 2024-12-09T18:48:15,050 WARN [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,050 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/table/491b5fa827e946cf8777c485e105a970 2024-12-09T18:48:15,059 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/info/b92359de14d8418c827a5a5ce37ff598 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/info/b92359de14d8418c827a5a5ce37ff598 2024-12-09T18:48:15,068 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/info/b92359de14d8418c827a5a5ce37ff598, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T18:48:15,070 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/ns/acac69a6470147ff8827f51ac44a1cf8 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/ns/acac69a6470147ff8827f51ac44a1cf8 2024-12-09T18:48:15,075 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35827-0x1000c1f31990002, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,076 INFO [RS:1;a437f8b9ba7d:35827 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:15,076 INFO [RS:1;a437f8b9ba7d:35827 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,35827,1733770091282; zookeeper connection closed. 2024-12-09T18:48:15,076 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1c4d3e8c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1c4d3e8c 2024-12-09T18:48:15,080 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/ns/acac69a6470147ff8827f51ac44a1cf8, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T18:48:15,082 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/.tmp/table/491b5fa827e946cf8777c485e105a970 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/table/491b5fa827e946cf8777c485e105a970 2024-12-09T18:48:15,093 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/table/491b5fa827e946cf8777c485e105a970, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T18:48:15,095 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false 2024-12-09T18:48:15,095 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-09T18:48:15,103 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T18:48:15,104 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T18:48:15,104 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:15,105 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733770094941Running coprocessor pre-close hooks at 1733770094941Disabling compacts and flushes for region at 1733770094941Disabling writes for close at 1733770094942 (+1 ms)Obtaining lock to block concurrent updates at 1733770094942Preparing flush snapshotting stores in 1588230740 at 1733770094942Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733770094943 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733770094944 (+1 ms)Flushing 1588230740/info: creating writer at 1733770094945 (+1 ms)Flushing 1588230740/info: appending metadata at 1733770094970 (+25 ms)Flushing 1588230740/info: closing flushed file at 1733770094970Flushing 1588230740/ns: creating writer at 1733770094991 (+21 ms)Flushing 1588230740/ns: appending metadata at 1733770095004 (+13 ms)Flushing 1588230740/ns: closing flushed file at 1733770095004Flushing 1588230740/table: creating writer at 1733770095023 (+19 ms)Flushing 1588230740/table: appending metadata at 1733770095038 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733770095038Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d1b264b: reopening flushed file at 1733770095057 (+19 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1e5f0e01: reopening flushed file at 1733770095068 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bed6ab4: reopening flushed file at 1733770095080 (+12 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 152ms, sequenceid=11, compaction requested=false at 1733770095095 (+15 ms)Writing region close event to WAL at 1733770095096 (+1 ms)Running coprocessor post-close hooks at 1733770095104 (+8 ms)Closed at 1733770095104 2024-12-09T18:48:15,105 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:15,141 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,39317,1733770091329; all regions closed. 2024-12-09T18:48:15,141 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,35405,1733770091187; all regions closed. 2024-12-09T18:48:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_1073741829_1019 (size=2751) 2024-12-09T18:48:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741827_1017 (size=1298) 2024-12-09T18:48:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741829_1019 (size=2751) 2024-12-09T18:48:15,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_1073741827_1017 (size=1298) 2024-12-09T18:48:15,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_1073741827_1017 (size=1298) 2024-12-09T18:48:15,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_1073741829_1019 (size=2751) 2024-12-09T18:48:15,151 DEBUG [RS:2;a437f8b9ba7d:39317 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs 2024-12-09T18:48:15,151 INFO [RS:2;a437f8b9ba7d:39317 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a437f8b9ba7d%2C39317%2C1733770091329:(num 1733770092781) 2024-12-09T18:48:15,151 DEBUG [RS:2;a437f8b9ba7d:39317 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:15,151 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:15,151 DEBUG [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs 2024-12-09T18:48:15,151 INFO [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a437f8b9ba7d%2C35405%2C1733770091187.meta:.meta(num 1733770093195) 2024-12-09T18:48:15,151 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:15,151 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:15,152 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:15,152 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:15,152 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:15,152 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:15,152 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:15,152 INFO [RS:2;a437f8b9ba7d:39317 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39317 2024-12-09T18:48:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741826_1016 (size=93) 2024-12-09T18:48:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_1073741826_1016 (size=93) 2024-12-09T18:48:15,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_1073741826_1016 (size=93) 2024-12-09T18:48:15,158 DEBUG [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/oldWALs 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL a437f8b9ba7d%2C35405%2C1733770091187:(num 1733770092781) 2024-12-09T18:48:15,158 DEBUG [RS:0;a437f8b9ba7d:35405 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:15,158 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,39317,1733770091329 2024-12-09T18:48:15,158 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:15,158 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:15,158 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:15,158 INFO [RS:0;a437f8b9ba7d:35405 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35405 2024-12-09T18:48:15,166 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,39317,1733770091329] 2024-12-09T18:48:15,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,35405,1733770091187 2024-12-09T18:48:15,175 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:15,175 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:15,183 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,39317,1733770091329 already deleted, retry=false 2024-12-09T18:48:15,183 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,39317,1733770091329 expired; onlineServers=1 2024-12-09T18:48:15,192 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,35405,1733770091187] 2024-12-09T18:48:15,200 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,35405,1733770091187 already deleted, retry=false 2024-12-09T18:48:15,200 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,35405,1733770091187 expired; onlineServers=0 2024-12-09T18:48:15,200 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a437f8b9ba7d,32905,1733770090495' ***** 2024-12-09T18:48:15,201 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T18:48:15,201 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:15,201 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:15,201 DEBUG [M:0;a437f8b9ba7d:32905 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T18:48:15,202 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T18:48:15,202 DEBUG [M:0;a437f8b9ba7d:32905 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T18:48:15,202 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770092378 {}] cleaner.HFileCleaner(306): Exit Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770092378,5,FailOnTimeoutGroup] 2024-12-09T18:48:15,202 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770092376 {}] cleaner.HFileCleaner(306): Exit Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770092376,5,FailOnTimeoutGroup] 2024-12-09T18:48:15,202 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.ChoreService(370): Chore service for: master/a437f8b9ba7d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:15,202 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:15,203 DEBUG [M:0;a437f8b9ba7d:32905 {}] master.HMaster(1795): Stopping service threads 2024-12-09T18:48:15,203 INFO [M:0;a437f8b9ba7d:32905 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T18:48:15,203 INFO [M:0;a437f8b9ba7d:32905 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T18:48:15,204 INFO [M:0;a437f8b9ba7d:32905 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T18:48:15,204 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T18:48:15,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:15,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:15,209 DEBUG [M:0;a437f8b9ba7d:32905 {}] zookeeper.ZKUtil(347): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T18:48:15,209 WARN [M:0;a437f8b9ba7d:32905 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T18:48:15,211 INFO [M:0;a437f8b9ba7d:32905 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/.lastflushedseqids 2024-12-09T18:48:15,223 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,223 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,227 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59582 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59582 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775584_1033 (size=127) 2024-12-09T18:48:15,231 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,231 INFO [M:0;a437f8b9ba7d:32905 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T18:48:15,231 INFO [M:0;a437f8b9ba7d:32905 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T18:48:15,232 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T18:48:15,232 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:15,232 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:15,232 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T18:48:15,232 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:15,232 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-12-09T18:48:15,249 DEBUG [M:0;a437f8b9ba7d:32905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69a4a2dd035346b9a5d61c13fecb75da is 82, key is hbase:meta,,1/info:regioninfo/1733770093289/Put/seqid=0 2024-12-09T18:48:15,251 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,251 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,254 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:59600 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:39629:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59600 dst: /127.0.0.1:39629 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_-9223372036854775568_1035 (size=5672) 2024-12-09T18:48:15,259 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,259 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69a4a2dd035346b9a5d61c13fecb75da 2024-12-09T18:48:15,267 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,267 INFO [RS:2;a437f8b9ba7d:39317 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:15,267 DEBUG [pool-77-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39317-0x1000c1f31990003, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,267 INFO [RS:2;a437f8b9ba7d:39317 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,39317,1733770091329; zookeeper connection closed. 2024-12-09T18:48:15,267 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2e8c4e4c {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2e8c4e4c 2024-12-09T18:48:15,282 DEBUG [M:0;a437f8b9ba7d:32905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/125c061ad20749f6bfe093c19f384779 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733770094026/Put/seqid=0 2024-12-09T18:48:15,284 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,284 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,287 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:51542 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:37907:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51542 dst: /127.0.0.1:37907 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_-9223372036854775552_1037 (size=6439) 2024-12-09T18:48:15,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,292 INFO [RS:0;a437f8b9ba7d:35405 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:15,292 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35405-0x1000c1f31990001, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,292 INFO [RS:0;a437f8b9ba7d:35405 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,35405,1733770091187; zookeeper connection closed. 2024-12-09T18:48:15,292 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/125c061ad20749f6bfe093c19f384779 2024-12-09T18:48:15,292 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6cc128db {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6cc128db 2024-12-09T18:48:15,293 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T18:48:15,313 DEBUG [M:0;a437f8b9ba7d:32905 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2d9ab69ff0d47fa8a01f5d262b3b776 is 69, key is a437f8b9ba7d,35405,1733770091187/rs:state/1733770092505/Put/seqid=0 2024-12-09T18:48:15,315 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,315 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-12-09T18:48:15,318 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1826204795_22 at /127.0.0.1:41998 [Receiving block BP-780308751-172.17.0.2-1733770086886:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:36227:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41998 dst: /127.0.0.1:36227 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-09T18:48:15,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_-9223372036854775536_1039 (size=5294) 2024-12-09T18:48:15,322 WARN [M:0;a437f8b9ba7d:32905 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-12-09T18:48:15,322 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2d9ab69ff0d47fa8a01f5d262b3b776 2024-12-09T18:48:15,333 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/69a4a2dd035346b9a5d61c13fecb75da as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69a4a2dd035346b9a5d61c13fecb75da 2024-12-09T18:48:15,342 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/69a4a2dd035346b9a5d61c13fecb75da, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T18:48:15,345 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/125c061ad20749f6bfe093c19f384779 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/125c061ad20749f6bfe093c19f384779 2024-12-09T18:48:15,352 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/125c061ad20749f6bfe093c19f384779, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T18:48:15,354 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/f2d9ab69ff0d47fa8a01f5d262b3b776 as hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f2d9ab69ff0d47fa8a01f5d262b3b776 2024-12-09T18:48:15,360 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/f2d9ab69ff0d47fa8a01f5d262b3b776, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T18:48:15,362 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false 2024-12-09T18:48:15,363 INFO [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:15,364 DEBUG [M:0;a437f8b9ba7d:32905 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733770095232Disabling compacts and flushes for region at 1733770095232Disabling writes for close at 1733770095232Obtaining lock to block concurrent updates at 1733770095232Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733770095232Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1733770095232Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733770095233 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733770095233Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733770095249 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733770095249Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733770095266 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733770095282 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733770095282Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733770095299 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733770095313 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733770095313Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@26f7edc1: reopening flushed file at 1733770095331 (+18 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b075d6f: reopening flushed file at 1733770095343 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fc7592: reopening flushed file at 1733770095352 (+9 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 130ms, sequenceid=72, compaction requested=false at 1733770095362 (+10 ms)Writing region close event to WAL at 1733770095363 (+1 ms)Closed at 1733770095363 2024-12-09T18:48:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37907 is added to blk_1073741825_1011 (size=32674) 2024-12-09T18:48:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36227 is added to blk_1073741825_1011 (size=32674) 2024-12-09T18:48:15,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39629 is added to blk_1073741825_1011 (size=32674) 2024-12-09T18:48:15,368 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:15,368 INFO [M:0;a437f8b9ba7d:32905 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T18:48:15,368 INFO [M:0;a437f8b9ba7d:32905 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32905 2024-12-09T18:48:15,368 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:15,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,475 INFO [M:0;a437f8b9ba7d:32905 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:15,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32905-0x1000c1f31990000, quorum=127.0.0.1:51741, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:15,485 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2e59159d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:15,487 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@a8e922f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:15,488 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:15,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@24f92c39{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:15,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c62369b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:15,490 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:15,490 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:15,490 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-780308751-172.17.0.2-1733770086886 (Datanode Uuid 2af04275-d0fe-4d39-be17-e6822b952cee) service to localhost/127.0.0.1:42057 2024-12-09T18:48:15,490 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:15,491 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data5/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data6/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,492 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:15,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c6b8f01{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:15,494 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11f28dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:15,494 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:15,494 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7fa8fa5c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:15,495 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6463ad04{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:15,496 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:15,496 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:15,496 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:15,496 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-780308751-172.17.0.2-1733770086886 (Datanode Uuid 033ac5c8-e995-42b6-b8f7-41cb62aa8932) service to localhost/127.0.0.1:42057 2024-12-09T18:48:15,496 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data3/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,497 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data4/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,497 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:15,499 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4839957b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:15,500 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5306f615{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:15,500 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:15,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1a2478ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:15,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@550154bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:15,501 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:15,501 WARN [BP-780308751-172.17.0.2-1733770086886 heartbeating to localhost/127.0.0.1:42057 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-780308751-172.17.0.2-1733770086886 (Datanode Uuid ad607743-4c54-48b4-8e12-1a0c325ff579) service to localhost/127.0.0.1:42057 2024-12-09T18:48:15,502 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:15,502 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data1/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,502 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:15,502 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/cluster_be9d1db5-c4ef-450b-77ca-5d9b12c28ce0/data/data2/current/BP-780308751-172.17.0.2-1733770086886 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:15,502 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:15,512 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T18:48:15,513 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:15,513 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:15,513 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:15,514 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:15,523 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T18:48:15,547 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T18:48:15,555 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=92 (was 161), OpenFileDescriptor=437 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=329 (was 349), ProcessCount=11 (was 11), AvailableMemoryMB=2803 (was 3098) 2024-12-09T18:48:15,562 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=92, OpenFileDescriptor=437, MaxFileDescriptor=1048576, SystemLoadAverage=329, ProcessCount=11, AvailableMemoryMB=2803 2024-12-09T18:48:15,562 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.log.dir so I do NOT create it in target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/949187c9-fcaa-9fad-90d1-73b631b15f28/hadoop.tmp.dir so I do NOT create it in target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3, deleteOnExit=true 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/test.cache.data in system properties and HBase conf 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.tmp.dir in system properties and HBase conf 2024-12-09T18:48:15,563 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-12-09T18:48:15,564 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-09T18:48:15,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/nfs.dump.dir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/java.io.tmpdir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-09T18:48:15,565 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-09T18:48:15,870 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:15,875 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:15,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:15,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:15,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T18:48:15,877 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:15,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16eaa68d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:15,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62802e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:15,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3c97821d{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/java.io.tmpdir/jetty-localhost-44345-hadoop-hdfs-3_4_1-tests_jar-_-any-5727298596089201758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T18:48:15,972 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@544c0dd2{HTTP/1.1, (http/1.1)}{localhost:44345} 2024-12-09T18:48:15,972 INFO [Time-limited test {}] server.Server(415): Started @10718ms 2024-12-09T18:48:16,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:16,151 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:16,151 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:16,151 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:16,151 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-09T18:48:16,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61d23bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:16,152 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20a0e688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:16,241 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eeef71e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/java.io.tmpdir/jetty-localhost-34965-hadoop-hdfs-3_4_1-tests_jar-_-any-12310721780421985227/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:16,242 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@70feba44{HTTP/1.1, (http/1.1)}{localhost:34965} 2024-12-09T18:48:16,242 INFO [Time-limited test {}] server.Server(415): Started @10988ms 2024-12-09T18:48:16,243 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:16,272 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:16,275 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:16,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:16,276 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:16,276 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T18:48:16,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4e5afbc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:16,277 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1b0441b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:16,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@30bdc6f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/java.io.tmpdir/jetty-localhost-34981-hadoop-hdfs-3_4_1-tests_jar-_-any-5198486045722397597/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:16,365 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5547eae9{HTTP/1.1, (http/1.1)}{localhost:34981} 2024-12-09T18:48:16,366 INFO [Time-limited test {}] server.Server(415): Started @11111ms 2024-12-09T18:48:16,367 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:16,392 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-09T18:48:16,396 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-09T18:48:16,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-09T18:48:16,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-09T18:48:16,396 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-09T18:48:16,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4c77de1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,AVAILABLE} 2024-12-09T18:48:16,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@438bc7ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-09T18:48:16,490 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@44672b71{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/java.io.tmpdir/jetty-localhost-34115-hadoop-hdfs-3_4_1-tests_jar-_-any-15763356373001689909/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:16,490 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@36a9ca95{HTTP/1.1, (http/1.1)}{localhost:34115} 2024-12-09T18:48:16,490 INFO [Time-limited test {}] server.Server(415): Started @11236ms 2024-12-09T18:48:16,492 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-09T18:48:16,818 WARN [Thread-539 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data1/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:16,818 WARN [Thread-540 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data2/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:16,838 WARN [Thread-481 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ade5a291197e99 with lease ID 0xfc36d0148469c900: Processing first storage report for DS-a6bfc84b-18e7-4e36-8d3e-1492c1419d66 from datanode DatanodeRegistration(127.0.0.1:42521, datanodeUuid=fa37c143-69b9-426e-9f3a-909bc46c5d1a, infoPort=36399, infoSecurePort=0, ipcPort=37561, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:16,840 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ade5a291197e99 with lease ID 0xfc36d0148469c900: from storage DS-a6bfc84b-18e7-4e36-8d3e-1492c1419d66 node DatanodeRegistration(127.0.0.1:42521, datanodeUuid=fa37c143-69b9-426e-9f3a-909bc46c5d1a, infoPort=36399, infoSecurePort=0, ipcPort=37561, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf3ade5a291197e99 with lease ID 0xfc36d0148469c900: Processing first storage report for DS-13613a5e-4c67-41aa-bca2-4ed7bd52d597 from datanode DatanodeRegistration(127.0.0.1:42521, datanodeUuid=fa37c143-69b9-426e-9f3a-909bc46c5d1a, infoPort=36399, infoSecurePort=0, ipcPort=37561, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:16,841 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf3ade5a291197e99 with lease ID 0xfc36d0148469c900: from storage DS-13613a5e-4c67-41aa-bca2-4ed7bd52d597 node DatanodeRegistration(127.0.0.1:42521, datanodeUuid=fa37c143-69b9-426e-9f3a-909bc46c5d1a, infoPort=36399, infoSecurePort=0, ipcPort=37561, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:17,026 WARN [Thread-552 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data3/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:17,026 WARN [Thread-553 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data4/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:17,039 WARN [Thread-504 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18ade6cb28741a4a with lease ID 0xfc36d0148469c901: Processing first storage report for DS-cbc57229-2e3c-4635-8de0-0a530db5a3be from datanode DatanodeRegistration(127.0.0.1:45639, datanodeUuid=9e3a0b98-2f61-48b7-82b0-0ea68294af69, infoPort=40165, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18ade6cb28741a4a with lease ID 0xfc36d0148469c901: from storage DS-cbc57229-2e3c-4635-8de0-0a530db5a3be node DatanodeRegistration(127.0.0.1:45639, datanodeUuid=9e3a0b98-2f61-48b7-82b0-0ea68294af69, infoPort=40165, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x18ade6cb28741a4a with lease ID 0xfc36d0148469c901: Processing first storage report for DS-e87ad345-d522-4504-9a00-3ffbdcb98954 from datanode DatanodeRegistration(127.0.0.1:45639, datanodeUuid=9e3a0b98-2f61-48b7-82b0-0ea68294af69, infoPort=40165, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:17,042 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x18ade6cb28741a4a with lease ID 0xfc36d0148469c901: from storage DS-e87ad345-d522-4504-9a00-3ffbdcb98954 node DatanodeRegistration(127.0.0.1:45639, datanodeUuid=9e3a0b98-2f61-48b7-82b0-0ea68294af69, infoPort=40165, infoSecurePort=0, ipcPort=40273, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T18:48:17,157 WARN [Thread-563 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data5/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:17,158 WARN [Thread-564 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data6/current/BP-1051587299-172.17.0.2-1733770095587/current, will proceed with Du for space computation calculation, 2024-12-09T18:48:17,176 WARN [Thread-527 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-09T18:48:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0bd3297762f3f78 with lease ID 0xfc36d0148469c902: Processing first storage report for DS-b84534a7-df3c-4d23-b588-e64e8459b546 from datanode DatanodeRegistration(127.0.0.1:44265, datanodeUuid=b44ebac1-feff-4fc1-bec6-08cb852937b0, infoPort=39661, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0bd3297762f3f78 with lease ID 0xfc36d0148469c902: from storage DS-b84534a7-df3c-4d23-b588-e64e8459b546 node DatanodeRegistration(127.0.0.1:44265, datanodeUuid=b44ebac1-feff-4fc1-bec6-08cb852937b0, infoPort=39661, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-09T18:48:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc0bd3297762f3f78 with lease ID 0xfc36d0148469c902: Processing first storage report for DS-79cfefa6-cd4e-4e8e-b571-32de31618b40 from datanode DatanodeRegistration(127.0.0.1:44265, datanodeUuid=b44ebac1-feff-4fc1-bec6-08cb852937b0, infoPort=39661, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587) 2024-12-09T18:48:17,179 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc0bd3297762f3f78 with lease ID 0xfc36d0148469c902: from storage DS-79cfefa6-cd4e-4e8e-b571-32de31618b40 node DatanodeRegistration(127.0.0.1:44265, datanodeUuid=b44ebac1-feff-4fc1-bec6-08cb852937b0, infoPort=39661, infoSecurePort=0, ipcPort=43205, storageInfo=lv=-57;cid=testClusterID;nsid=389225399;c=1733770095587), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-09T18:48:17,223 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c 2024-12-09T18:48:17,228 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/zookeeper_0, clientPort=60447, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-09T18:48:17,230 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60447 2024-12-09T18:48:17,230 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,232 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741825_1001 (size=7) 2024-12-09T18:48:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741825_1001 (size=7) 2024-12-09T18:48:17,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741825_1001 (size=7) 2024-12-09T18:48:17,246 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 with version=8 2024-12-09T18:48:17,246 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:42057/user/jenkins/test-data/d4b078ed-972d-e946-da02-d24fc83a4a95/hbase-staging 2024-12-09T18:48:17,248 INFO [Time-limited test {}] client.ConnectionUtils(128): master/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:17,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,248 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,248 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:17,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,249 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:17,249 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-12-09T18:48:17,249 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:17,249 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41833 2024-12-09T18:48:17,251 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41833 connecting to ZooKeeper ensemble=127.0.0.1:60447 2024-12-09T18:48:17,301 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:418330x0, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:17,302 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41833-0x1000c1f4ecb0000 connected 2024-12-09T18:48:17,376 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,379 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,383 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:17,383 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878, hbase.cluster.distributed=false 2024-12-09T18:48:17,385 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:17,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41833 2024-12-09T18:48:17,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41833 2024-12-09T18:48:17,386 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41833 2024-12-09T18:48:17,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41833 2024-12-09T18:48:17,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41833 2024-12-09T18:48:17,401 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:17,401 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:17,402 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35043 2024-12-09T18:48:17,403 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35043 connecting to ZooKeeper ensemble=127.0.0.1:60447 2024-12-09T18:48:17,404 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,405 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350430x0, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:17,417 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35043-0x1000c1f4ecb0001 connected 2024-12-09T18:48:17,417 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:17,417 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:17,418 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:17,419 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:17,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:17,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35043 2024-12-09T18:48:17,421 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35043 2024-12-09T18:48:17,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35043 2024-12-09T18:48:17,422 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35043 2024-12-09T18:48:17,423 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35043 2024-12-09T18:48:17,440 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:17,440 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:17,441 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:17,441 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42855 2024-12-09T18:48:17,442 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42855 connecting to ZooKeeper ensemble=127.0.0.1:60447 2024-12-09T18:48:17,443 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,444 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,450 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:428550x0, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:17,450 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42855-0x1000c1f4ecb0002 connected 2024-12-09T18:48:17,450 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:17,451 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:17,451 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:17,452 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:17,453 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:17,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42855 2024-12-09T18:48:17,453 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42855 2024-12-09T18:48:17,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42855 2024-12-09T18:48:17,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42855 2024-12-09T18:48:17,454 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42855 2024-12-09T18:48:17,469 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/a437f8b9ba7d:0 server-side Connection retries=45 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-09T18:48:17,469 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-09T18:48:17,470 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43343 2024-12-09T18:48:17,471 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43343 connecting to ZooKeeper ensemble=127.0.0.1:60447 2024-12-09T18:48:17,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433430x0, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-09T18:48:17,484 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43343-0x1000c1f4ecb0003 connected 2024-12-09T18:48:17,484 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:17,484 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-09T18:48:17,485 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-09T18:48:17,486 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-09T18:48:17,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-09T18:48:17,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43343 2024-12-09T18:48:17,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43343 2024-12-09T18:48:17,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43343 2024-12-09T18:48:17,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43343 2024-12-09T18:48:17,489 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43343 2024-12-09T18:48:17,501 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;a437f8b9ba7d:41833 2024-12-09T18:48:17,501 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,508 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,509 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:17,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:17,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,516 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:17,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,517 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-09T18:48:17,518 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/a437f8b9ba7d,41833,1733770097248 from backup master directory 2024-12-09T18:48:17,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-09T18:48:17,525 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:17,525 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,533 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/hbase.id] with ID: 14f869c9-8304-4845-b6a5-76c946263a5d 2024-12-09T18:48:17,533 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/.tmp/hbase.id 2024-12-09T18:48:17,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741826_1002 (size=42) 2024-12-09T18:48:17,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741826_1002 (size=42) 2024-12-09T18:48:17,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741826_1002 (size=42) 2024-12-09T18:48:17,544 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/.tmp/hbase.id]:[hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/hbase.id] 2024-12-09T18:48:17,560 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-09T18:48:17,560 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-12-09T18:48:17,562 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-12-09T18:48:17,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,567 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741827_1003 (size=196) 2024-12-09T18:48:17,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741827_1003 (size=196) 2024-12-09T18:48:17,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741827_1003 (size=196) 2024-12-09T18:48:17,578 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T18:48:17,579 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-09T18:48:17,579 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T18:48:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741828_1004 (size=1189) 2024-12-09T18:48:17,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741828_1004 (size=1189) 2024-12-09T18:48:17,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741828_1004 (size=1189) 2024-12-09T18:48:17,594 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store 2024-12-09T18:48:17,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741829_1005 (size=34) 2024-12-09T18:48:17,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741829_1005 (size=34) 2024-12-09T18:48:17,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741829_1005 (size=34) 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T18:48:17,607 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:17,607 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:17,607 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733770097607Disabling compacts and flushes for region at 1733770097607Disabling writes for close at 1733770097607Writing region close event to WAL at 1733770097607Closed at 1733770097607 2024-12-09T18:48:17,608 WARN [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/.initializing 2024-12-09T18:48:17,608 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/WALs/a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,612 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C41833%2C1733770097248, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/WALs/a437f8b9ba7d,41833,1733770097248, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/oldWALs, maxLogs=10 2024-12-09T18:48:17,613 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor a437f8b9ba7d%2C41833%2C1733770097248.1733770097613 2024-12-09T18:48:17,624 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/WALs/a437f8b9ba7d,41833,1733770097248/a437f8b9ba7d%2C41833%2C1733770097248.1733770097613 2024-12-09T18:48:17,628 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40165:40165),(127.0.0.1/127.0.0.1:36399:36399),(127.0.0.1/127.0.0.1:39661:39661)] 2024-12-09T18:48:17,629 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:17,629 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:17,629 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,629 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,631 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-09T18:48:17,633 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:17,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-09T18:48:17,636 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:17,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-09T18:48:17,640 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,640 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:17,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,642 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-09T18:48:17,643 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,643 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:17,644 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,645 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,645 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,647 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,647 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,647 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:17,649 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-09T18:48:17,652 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:17,652 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69493579, jitterRate=0.035535022616386414}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:17,653 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1733770097629Initializing all the Stores at 1733770097631 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770097631Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770097631Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770097631Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770097631Cleaning up temporary data from old regions at 1733770097647 (+16 ms)Region opened successfully at 1733770097653 (+6 ms) 2024-12-09T18:48:17,654 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-09T18:48:17,658 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@197b0adb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:17,659 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-12-09T18:48:17,659 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-09T18:48:17,660 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-09T18:48:17,660 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-09T18:48:17,660 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-12-09T18:48:17,661 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-12-09T18:48:17,661 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-09T18:48:17,664 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-09T18:48:17,665 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-09T18:48:17,675 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-12-09T18:48:17,675 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-09T18:48:17,676 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-09T18:48:17,683 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-12-09T18:48:17,684 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-09T18:48:17,685 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-09T18:48:17,691 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-12-09T18:48:17,693 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-09T18:48:17,700 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-12-09T18:48:17,702 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-09T18:48:17,708 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,717 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=a437f8b9ba7d,41833,1733770097248, sessionid=0x1000c1f4ecb0000, setting cluster-up flag (Was=false) 2024-12-09T18:48:17,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,758 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-09T18:48:17,760 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,775 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:17,800 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-09T18:48:17,801 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:17,803 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-12-09T18:48:17,805 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:17,806 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-12-09T18:48:17,806 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-09T18:48:17,806 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: a437f8b9ba7d,41833,1733770097248 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=5, maxPoolSize=5 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/a437f8b9ba7d:0, corePoolSize=10, maxPoolSize=10 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:17,808 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,809 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733770127809 2024-12-09T18:48:17,809 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-09T18:48:17,810 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,811 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-09T18:48:17,811 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-09T18:48:17,811 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-09T18:48:17,811 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:17,811 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-12-09T18:48:17,811 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-09T18:48:17,811 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-09T18:48:17,811 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770097811,5,FailOnTimeoutGroup] 2024-12-09T18:48:17,812 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770097811,5,FailOnTimeoutGroup] 2024-12-09T18:48:17,812 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,812 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-09T18:48:17,812 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,812 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,812 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,813 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-09T18:48:17,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741831_1007 (size=1321) 2024-12-09T18:48:17,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741831_1007 (size=1321) 2024-12-09T18:48:17,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741831_1007 (size=1321) 2024-12-09T18:48:17,822 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-12-09T18:48:17,822 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 2024-12-09T18:48:17,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741832_1008 (size=32) 2024-12-09T18:48:17,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741832_1008 (size=32) 2024-12-09T18:48:17,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741832_1008 (size=32) 2024-12-09T18:48:17,831 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:17,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T18:48:17,834 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T18:48:17,834 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:17,835 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T18:48:17,836 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T18:48:17,836 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:17,837 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T18:48:17,838 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T18:48:17,838 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:17,839 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T18:48:17,840 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T18:48:17,840 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:17,841 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:17,841 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T18:48:17,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740 2024-12-09T18:48:17,842 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740 2024-12-09T18:48:17,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T18:48:17,844 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T18:48:17,844 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:17,846 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T18:48:17,848 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:17,848 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59090384, jitterRate=-0.11948466300964355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:17,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1733770097831Initializing all the Stores at 1733770097832 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770097832Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770097832Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770097833 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770097833Cleaning up temporary data from old regions at 1733770097844 (+11 ms)Region opened successfully at 1733770097849 (+5 ms) 2024-12-09T18:48:17,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T18:48:17,849 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T18:48:17,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T18:48:17,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T18:48:17,849 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T18:48:17,850 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:17,850 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733770097849Disabling compacts and flushes for region at 1733770097849Disabling writes for close at 1733770097849Writing region close event to WAL at 1733770097850 (+1 ms)Closed at 1733770097850 2024-12-09T18:48:17,852 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:17,852 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-12-09T18:48:17,852 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-09T18:48:17,853 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T18:48:17,855 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-09T18:48:17,891 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(746): ClusterId : 14f869c9-8304-4845-b6a5-76c946263a5d 2024-12-09T18:48:17,891 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(746): ClusterId : 14f869c9-8304-4845-b6a5-76c946263a5d 2024-12-09T18:48:17,891 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(746): ClusterId : 14f869c9-8304-4845-b6a5-76c946263a5d 2024-12-09T18:48:17,891 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:17,892 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:17,892 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-09T18:48:17,901 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:17,901 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:17,901 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-09T18:48:17,901 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:17,901 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:17,902 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-09T18:48:17,909 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:17,909 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:17,909 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-09T18:48:17,910 DEBUG [RS:1;a437f8b9ba7d:42855 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ff4fd75, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:17,910 DEBUG [RS:2;a437f8b9ba7d:43343 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65f12a22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:17,910 DEBUG [RS:0;a437f8b9ba7d:35043 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25009b22, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=a437f8b9ba7d/172.17.0.2:0 2024-12-09T18:48:17,925 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;a437f8b9ba7d:43343 2024-12-09T18:48:17,925 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:17,925 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:17,925 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:17,926 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,41833,1733770097248 with port=43343, startcode=1733770097469 2024-12-09T18:48:17,926 DEBUG [RS:2;a437f8b9ba7d:43343 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:17,927 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;a437f8b9ba7d:35043 2024-12-09T18:48:17,927 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;a437f8b9ba7d:42855 2024-12-09T18:48:17,927 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:17,927 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-12-09T18:48:17,927 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:17,927 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-12-09T18:48:17,927 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:17,927 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(832): About to register with Master. 2024-12-09T18:48:17,928 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,41833,1733770097248 with port=35043, startcode=1733770097400 2024-12-09T18:48:17,928 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(2659): reportForDuty to master=a437f8b9ba7d,41833,1733770097248 with port=42855, startcode=1733770097440 2024-12-09T18:48:17,928 DEBUG [RS:0;a437f8b9ba7d:35043 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:17,928 DEBUG [RS:1;a437f8b9ba7d:42855 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-09T18:48:17,929 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38861, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:17,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:17,929 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:17,930 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54559, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:17,930 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48511, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-12-09T18:48:17,931 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:17,932 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 2024-12-09T18:48:17,932 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40897 2024-12-09T18:48:17,932 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:17,932 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:17,934 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(363): Checking decommissioned status of RegionServer a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:17,934 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 2024-12-09T18:48:17,934 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41833 {}] master.ServerManager(517): Registering regionserver=a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:17,934 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40897 2024-12-09T18:48:17,934 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:17,936 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 2024-12-09T18:48:17,937 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:40897 2024-12-09T18:48:17,937 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-12-09T18:48:17,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:17,968 DEBUG [RS:2;a437f8b9ba7d:43343 {}] zookeeper.ZKUtil(111): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:17,968 WARN [RS:2;a437f8b9ba7d:43343 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:17,968 INFO [RS:2;a437f8b9ba7d:43343 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T18:48:17,969 DEBUG [RS:0;a437f8b9ba7d:35043 {}] zookeeper.ZKUtil(111): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:17,969 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,35043,1733770097400] 2024-12-09T18:48:17,969 WARN [RS:0;a437f8b9ba7d:35043 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:17,969 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,42855,1733770097440] 2024-12-09T18:48:17,969 DEBUG [RS:1;a437f8b9ba7d:42855 {}] zookeeper.ZKUtil(111): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:17,969 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [a437f8b9ba7d,43343,1733770097469] 2024-12-09T18:48:17,969 INFO [RS:0;a437f8b9ba7d:35043 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T18:48:17,969 DEBUG [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:17,969 WARN [RS:1;a437f8b9ba7d:42855 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-09T18:48:17,969 INFO [RS:1;a437f8b9ba7d:42855 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T18:48:17,969 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:17,969 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:17,974 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:17,974 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:17,974 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-09T18:48:17,978 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:17,979 INFO [RS:0;a437f8b9ba7d:35043 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:17,979 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:17,979 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,981 INFO [RS:1;a437f8b9ba7d:42855 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:17,981 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,981 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:17,982 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-09T18:48:17,982 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:17,983 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:17,983 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,984 DEBUG [RS:0;a437f8b9ba7d:35043 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,987 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:17,987 INFO [RS:2;a437f8b9ba7d:43343 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-09T18:48:17,987 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,988 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35043,1733770097400-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:17,989 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=2, maxPoolSize=2 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,989 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,990 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,990 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,990 DEBUG [RS:1;a437f8b9ba7d:42855 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,990 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/a437f8b9ba7d:0, corePoolSize=1, maxPoolSize=1 2024-12-09T18:48:17,990 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,990 DEBUG [RS:2;a437f8b9ba7d:43343 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0, corePoolSize=3, maxPoolSize=3 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,42855,1733770097440-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,990 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,991 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:17,991 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,43343,1733770097469-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:18,002 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:18,002 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,42855,1733770097440-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,002 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,002 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.Replication(171): a437f8b9ba7d,42855,1733770097440 started 2024-12-09T18:48:18,003 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:18,004 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,35043,1733770097400-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,004 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,004 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.Replication(171): a437f8b9ba7d,35043,1733770097400 started 2024-12-09T18:48:18,005 WARN [a437f8b9ba7d:41833 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-12-09T18:48:18,014 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,014 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,42855,1733770097440, RpcServer on a437f8b9ba7d/172.17.0.2:42855, sessionid=0x1000c1f4ecb0002 2024-12-09T18:48:18,014 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:18,014 DEBUG [RS:1;a437f8b9ba7d:42855 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:18,014 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,42855,1733770097440' 2024-12-09T18:48:18,014 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:18,015 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:18,015 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,43343,1733770097469-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,015 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,015 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.Replication(171): a437f8b9ba7d,43343,1733770097469 started 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,42855,1733770097440' 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:18,015 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:18,016 DEBUG [RS:1;a437f8b9ba7d:42855 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:18,016 INFO [RS:1;a437f8b9ba7d:42855 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:18,016 INFO [RS:1;a437f8b9ba7d:42855 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:18,021 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,021 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,35043,1733770097400, RpcServer on a437f8b9ba7d/172.17.0.2:35043, sessionid=0x1000c1f4ecb0001 2024-12-09T18:48:18,022 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:18,022 DEBUG [RS:0;a437f8b9ba7d:35043 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:18,022 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35043,1733770097400' 2024-12-09T18:48:18,022 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:18,022 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,35043,1733770097400' 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:18,023 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:18,024 DEBUG [RS:0;a437f8b9ba7d:35043 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:18,024 INFO [RS:0;a437f8b9ba7d:35043 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:18,024 INFO [RS:0;a437f8b9ba7d:35043 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:18,029 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,029 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1482): Serving as a437f8b9ba7d,43343,1733770097469, RpcServer on a437f8b9ba7d/172.17.0.2:43343, sessionid=0x1000c1f4ecb0003 2024-12-09T18:48:18,029 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-09T18:48:18,029 DEBUG [RS:2;a437f8b9ba7d:43343 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:18,029 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,43343,1733770097469' 2024-12-09T18:48:18,029 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-09T18:48:18,030 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'a437f8b9ba7d,43343,1733770097469' 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-09T18:48:18,031 DEBUG [RS:2;a437f8b9ba7d:43343 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-09T18:48:18,031 INFO [RS:2;a437f8b9ba7d:43343 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-09T18:48:18,032 INFO [RS:2;a437f8b9ba7d:43343 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-09T18:48:18,119 INFO [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C42855%2C1733770097440, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,42855,1733770097440, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs, maxLogs=32 2024-12-09T18:48:18,121 INFO [RS:1;a437f8b9ba7d:42855 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a437f8b9ba7d%2C42855%2C1733770097440.1733770098121 2024-12-09T18:48:18,127 INFO [RS:0;a437f8b9ba7d:35043 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C35043%2C1733770097400, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,35043,1733770097400, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs, maxLogs=32 2024-12-09T18:48:18,130 INFO [RS:0;a437f8b9ba7d:35043 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a437f8b9ba7d%2C35043%2C1733770097400.1733770098129 2024-12-09T18:48:18,131 INFO [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,42855,1733770097440/a437f8b9ba7d%2C42855%2C1733770097440.1733770098121 2024-12-09T18:48:18,133 DEBUG [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40165:40165),(127.0.0.1/127.0.0.1:36399:36399),(127.0.0.1/127.0.0.1:39661:39661)] 2024-12-09T18:48:18,135 INFO [RS:2;a437f8b9ba7d:43343 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C43343%2C1733770097469, suffix=, logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,43343,1733770097469, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs, maxLogs=32 2024-12-09T18:48:18,136 INFO [RS:2;a437f8b9ba7d:43343 {}] monitor.StreamSlowMonitor(122): New stream slow monitor a437f8b9ba7d%2C43343%2C1733770097469.1733770098136 2024-12-09T18:48:18,140 INFO [RS:0;a437f8b9ba7d:35043 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,35043,1733770097400/a437f8b9ba7d%2C35043%2C1733770097400.1733770098129 2024-12-09T18:48:18,141 DEBUG [RS:0;a437f8b9ba7d:35043 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39661:39661),(127.0.0.1/127.0.0.1:36399:36399),(127.0.0.1/127.0.0.1:40165:40165)] 2024-12-09T18:48:18,143 INFO [RS:2;a437f8b9ba7d:43343 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,43343,1733770097469/a437f8b9ba7d%2C43343%2C1733770097469.1733770098136 2024-12-09T18:48:18,144 DEBUG [RS:2;a437f8b9ba7d:43343 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36399:36399),(127.0.0.1/127.0.0.1:40165:40165),(127.0.0.1/127.0.0.1:39661:39661)] 2024-12-09T18:48:18,255 DEBUG [a437f8b9ba7d:41833 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-09T18:48:18,256 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(204): Hosts are {a437f8b9ba7d=0} racks are {/default-rack=0} 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T18:48:18,258 INFO [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T18:48:18,258 INFO [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T18:48:18,258 INFO [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T18:48:18,258 DEBUG [a437f8b9ba7d:41833 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T18:48:18,259 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:18,261 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a437f8b9ba7d,42855,1733770097440, state=OPENING 2024-12-09T18:48:18,308 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-09T18:48:18,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:18,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:18,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:18,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:18,319 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-09T18:48:18,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,319 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,319 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,42855,1733770097440}] 2024-12-09T18:48:18,320 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,477 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T18:48:18,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36549, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T18:48:18,490 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-12-09T18:48:18,490 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-12-09T18:48:18,493 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=a437f8b9ba7d%2C42855%2C1733770097440.meta, suffix=.meta, logDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,42855,1733770097440, archiveDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs, maxLogs=32 2024-12-09T18:48:18,494 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor a437f8b9ba7d%2C42855%2C1733770097440.meta.1733770098494.meta 2024-12-09T18:48:18,500 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/WALs/a437f8b9ba7d,42855,1733770097440/a437f8b9ba7d%2C42855%2C1733770097440.meta.1733770098494.meta 2024-12-09T18:48:18,505 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39661:39661),(127.0.0.1/127.0.0.1:36399:36399),(127.0.0.1/127.0.0.1:40165:40165)] 2024-12-09T18:48:18,506 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-09T18:48:18,507 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-12-09T18:48:18,507 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-12-09T18:48:18,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-09T18:48:18,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-09T18:48:18,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:18,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-12-09T18:48:18,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-12-09T18:48:18,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:18,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-09T18:48:18,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-09T18:48:18,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:18,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-09T18:48:18,516 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-09T18:48:18,516 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,516 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-09T18:48:18,517 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-12-09T18:48:18,517 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740 2024-12-09T18:48:18,519 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740 2024-12-09T18:48:18,520 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-12-09T18:48:18,520 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-12-09T18:48:18,521 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-09T18:48:18,522 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-12-09T18:48:18,523 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63613559, jitterRate=-0.05208410322666168}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-09T18:48:18,523 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-12-09T18:48:18,524 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1733770098507Writing region info on filesystem at 1733770098507Initializing all the Stores at 1733770098509 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770098509Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770098509Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770098509Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1733770098509Cleaning up temporary data from old regions at 1733770098520 (+11 ms)Running coprocessor post-open hooks at 1733770098523 (+3 ms)Region opened successfully at 1733770098524 (+1 ms) 2024-12-09T18:48:18,525 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733770098476 2024-12-09T18:48:18,529 DEBUG [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-09T18:48:18,529 INFO [RS_OPEN_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-12-09T18:48:18,530 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:18,531 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as a437f8b9ba7d,42855,1733770097440, state=OPEN 2024-12-09T18:48:18,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:18,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:18,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:18,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-09T18:48:18,542 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:18,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,542 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-09T18:48:18,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-12-09T18:48:18,547 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=a437f8b9ba7d,42855,1733770097440 in 223 msec 2024-12-09T18:48:18,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-12-09T18:48:18,553 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 695 msec 2024-12-09T18:48:18,554 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-12-09T18:48:18,554 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-12-09T18:48:18,556 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T18:48:18,556 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a437f8b9ba7d,42855,1733770097440, seqNum=-1] 2024-12-09T18:48:18,556 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:18,558 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55887, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:18,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 760 msec 2024-12-09T18:48:18,567 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733770098566, completionTime=-1 2024-12-09T18:48:18,567 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-09T18:48:18,567 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-12-09T18:48:18,569 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-12-09T18:48:18,569 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733770158569 2024-12-09T18:48:18,569 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733770218569 2024-12-09T18:48:18,569 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-12-09T18:48:18,570 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-09T18:48:18,570 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,570 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,570 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,571 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-a437f8b9ba7d:41833, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,571 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,571 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,574 DEBUG [master/a437f8b9ba7d:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.051sec 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-09T18:48:18,576 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-09T18:48:18,577 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-09T18:48:18,579 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-12-09T18:48:18,579 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-09T18:48:18,579 INFO [master/a437f8b9ba7d:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=a437f8b9ba7d,41833,1733770097248-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-09T18:48:18,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef9cf8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:18,592 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request a437f8b9ba7d,41833,-1 for getting cluster id 2024-12-09T18:48:18,592 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-12-09T18:48:18,593 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '14f869c9-8304-4845-b6a5-76c946263a5d' 2024-12-09T18:48:18,594 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-12-09T18:48:18,594 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "14f869c9-8304-4845-b6a5-76c946263a5d" 2024-12-09T18:48:18,595 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48591898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:18,595 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [a437f8b9ba7d,41833,-1] 2024-12-09T18:48:18,595 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-12-09T18:48:18,595 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:18,597 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57132, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-12-09T18:48:18,598 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@98aa2ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-09T18:48:18,599 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-12-09T18:48:18,600 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=a437f8b9ba7d,42855,1733770097440, seqNum=-1] 2024-12-09T18:48:18,601 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:18,603 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:18,606 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:18,607 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-12-09T18:48:18,608 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncConnectionImpl(321): The fetched master address is a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:18,608 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@70287e38 2024-12-09T18:48:18,609 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-09T18:48:18,611 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-09T18:48:18,612 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-09T18:48:18,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-12-09T18:48:18,617 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-12-09T18:48:18,617 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,617 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-12-09T18:48:18,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:18,619 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-09T18:48:18,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741837_1013 (size=392) 2024-12-09T18:48:18,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741837_1013 (size=392) 2024-12-09T18:48:18,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741837_1013 (size=392) 2024-12-09T18:48:18,630 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 823824f2f6b5769910ca732509af06f4, NAME => 'TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878 2024-12-09T18:48:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741838_1014 (size=51) 2024-12-09T18:48:18,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741838_1014 (size=51) 2024-12-09T18:48:18,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741838_1014 (size=51) 2024-12-09T18:48:18,639 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:18,639 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 823824f2f6b5769910ca732509af06f4, disabling compactions & flushes 2024-12-09T18:48:18,639 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,639 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,639 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. after waiting 0 ms 2024-12-09T18:48:18,639 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,640 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,640 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 823824f2f6b5769910ca732509af06f4: Waiting for close lock at 1733770098639Disabling compacts and flushes for region at 1733770098639Disabling writes for close at 1733770098639Writing region close event to WAL at 1733770098640 (+1 ms)Closed at 1733770098640 2024-12-09T18:48:18,641 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-12-09T18:48:18,642 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1733770098641"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733770098641"}]},"ts":"1733770098641"} 2024-12-09T18:48:18,644 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-12-09T18:48:18,646 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-09T18:48:18,646 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733770098646"}]},"ts":"1733770098646"} 2024-12-09T18:48:18,649 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-12-09T18:48:18,649 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {a437f8b9ba7d=0} racks are {/default-rack=0} 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-12-09T18:48:18,650 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-12-09T18:48:18,650 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-12-09T18:48:18,650 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-12-09T18:48:18,650 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-12-09T18:48:18,650 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=823824f2f6b5769910ca732509af06f4, ASSIGN}] 2024-12-09T18:48:18,652 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=823824f2f6b5769910ca732509af06f4, ASSIGN 2024-12-09T18:48:18,654 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=823824f2f6b5769910ca732509af06f4, ASSIGN; state=OFFLINE, location=a437f8b9ba7d,35043,1733770097400; forceNewPlan=false, retain=false 2024-12-09T18:48:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:18,739 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-12-09T18:48:18,747 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T18:48:18,788 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T18:48:18,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-12-09T18:48:18,804 INFO [a437f8b9ba7d:41833 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-09T18:48:18,804 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=823824f2f6b5769910ca732509af06f4, regionState=OPENING, regionLocation=a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:18,808 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=823824f2f6b5769910ca732509af06f4, ASSIGN because future has completed 2024-12-09T18:48:18,808 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 823824f2f6b5769910ca732509af06f4, server=a437f8b9ba7d,35043,1733770097400}] 2024-12-09T18:48:18,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:18,963 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-09T18:48:18,965 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43427, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-09T18:48:18,971 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,971 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 823824f2f6b5769910ca732509af06f4, NAME => 'TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.', STARTKEY => '', ENDKEY => ''} 2024-12-09T18:48:18,972 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,972 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-09T18:48:18,972 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,972 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,974 INFO [StoreOpener-823824f2f6b5769910ca732509af06f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,976 INFO [StoreOpener-823824f2f6b5769910ca732509af06f4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 823824f2f6b5769910ca732509af06f4 columnFamilyName cf 2024-12-09T18:48:18,976 DEBUG [StoreOpener-823824f2f6b5769910ca732509af06f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-09T18:48:18,977 INFO [StoreOpener-823824f2f6b5769910ca732509af06f4-1 {}] regionserver.HStore(327): Store=823824f2f6b5769910ca732509af06f4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-09T18:48:18,977 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,978 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,979 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,979 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,979 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,982 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,986 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-09T18:48:18,986 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 823824f2f6b5769910ca732509af06f4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71312866, jitterRate=0.06264451146125793}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-09T18:48:18,986 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:18,987 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 823824f2f6b5769910ca732509af06f4: Running coprocessor pre-open hook at 1733770098972Writing region info on filesystem at 1733770098972Initializing all the Stores at 1733770098974 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1733770098974Cleaning up temporary data from old regions at 1733770098979 (+5 ms)Running coprocessor post-open hooks at 1733770098986 (+7 ms)Region opened successfully at 1733770098987 (+1 ms) 2024-12-09T18:48:18,988 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4., pid=6, masterSystemTime=1733770098962 2024-12-09T18:48:18,991 DEBUG [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,991 INFO [RS_OPEN_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:18,993 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=823824f2f6b5769910ca732509af06f4, regionState=OPEN, openSeqNum=2, regionLocation=a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:18,996 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 823824f2f6b5769910ca732509af06f4, server=a437f8b9ba7d,35043,1733770097400 because future has completed 2024-12-09T18:48:19,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-12-09T18:48:19,002 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 823824f2f6b5769910ca732509af06f4, server=a437f8b9ba7d,35043,1733770097400 in 189 msec 2024-12-09T18:48:19,005 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-12-09T18:48:19,006 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=823824f2f6b5769910ca732509af06f4, ASSIGN in 352 msec 2024-12-09T18:48:19,007 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-09T18:48:19,007 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733770099007"}]},"ts":"1733770099007"} 2024-12-09T18:48:19,010 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-12-09T18:48:19,012 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-12-09T18:48:19,015 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 400 msec 2024-12-09T18:48:19,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-12-09T18:48:19,247 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T18:48:19,247 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-12-09T18:48:19,248 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T18:48:19,255 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-12-09T18:48:19,256 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-09T18:48:19,256 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-12-09T18:48:19,260 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4., hostname=a437f8b9ba7d,35043,1733770097400, seqNum=2] 2024-12-09T18:48:19,260 DEBUG [RPCClient-NioEventLoopGroup-6-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-09T18:48:19,262 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32824, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-09T18:48:19,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-12-09T18:48:19,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-12-09T18:48:19,268 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-12-09T18:48:19,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:19,270 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-12-09T18:48:19,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-12-09T18:48:19,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:19,425 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35043 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-12-09T18:48:19,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,426 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 823824f2f6b5769910ca732509af06f4 1/1 column families, dataSize=32 B heapSize=360 B 2024-12-09T18:48:19,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/.tmp/cf/33b86e6d22aa483a862ac5a79e8a0c1b is 36, key is row/cf:cq/1733770099263/Put/seqid=0 2024-12-09T18:48:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741839_1015 (size=4787) 2024-12-09T18:48:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741839_1015 (size=4787) 2024-12-09T18:48:19,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741839_1015 (size=4787) 2024-12-09T18:48:19,448 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/.tmp/cf/33b86e6d22aa483a862ac5a79e8a0c1b 2024-12-09T18:48:19,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/.tmp/cf/33b86e6d22aa483a862ac5a79e8a0c1b as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/cf/33b86e6d22aa483a862ac5a79e8a0c1b 2024-12-09T18:48:19,465 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/cf/33b86e6d22aa483a862ac5a79e8a0c1b, entries=1, sequenceid=5, filesize=4.7 K 2024-12-09T18:48:19,466 INFO [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 823824f2f6b5769910ca732509af06f4 in 40ms, sequenceid=5, compaction requested=false 2024-12-09T18:48:19,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 823824f2f6b5769910ca732509af06f4: 2024-12-09T18:48:19,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/a437f8b9ba7d:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-12-09T18:48:19,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-12-09T18:48:19,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-12-09T18:48:19,473 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 199 msec 2024-12-09T18:48:19,477 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 209 msec 2024-12-09T18:48:19,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41833 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-12-09T18:48:19,585 INFO [RPCClient-NioEventLoopGroup-6-9 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-12-09T18:48:19,591 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-12-09T18:48:19,591 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T18:48:19,591 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:19,591 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,592 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-12-09T18:48:19,592 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-09T18:48:19,592 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1536877160, stopped=false 2024-12-09T18:48:19,592 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=a437f8b9ba7d,41833,1733770097248 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:19,600 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:19,600 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T18:48:19,600 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-12-09T18:48:19,601 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:19,601 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,35043,1733770097400' ***** 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,42855,1733770097440' ***** 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:19,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'a437f8b9ba7d,43343,1733770097469' ***** 2024-12-09T18:48:19,601 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-12-09T18:48:19,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:19,601 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:19,601 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-09T18:48:19,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:19,602 INFO [RS:1;a437f8b9ba7d:42855 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:19,602 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:19,602 INFO [RS:1;a437f8b9ba7d:42855 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:19,602 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:19,602 INFO [RS:0;a437f8b9ba7d:35043 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:19,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:19,602 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-09T18:48:19,602 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:19,602 INFO [RS:1;a437f8b9ba7d:42855 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;a437f8b9ba7d:42855. 2024-12-09T18:48:19,602 INFO [RS:0;a437f8b9ba7d:35043 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:19,602 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(3091): Received CLOSE for 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:19,602 INFO [RS:2;a437f8b9ba7d:43343 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-09T18:48:19,602 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-12-09T18:48:19,603 INFO [RS:2;a437f8b9ba7d:43343 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-09T18:48:19,603 DEBUG [RS:1;a437f8b9ba7d:42855 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:19,603 DEBUG [RS:1;a437f8b9ba7d:42855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,603 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:19,603 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(959): stopping server a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:19,603 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:19,603 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:19,603 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:19,603 INFO [RS:2;a437f8b9ba7d:43343 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;a437f8b9ba7d:43343. 2024-12-09T18:48:19,603 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:19,603 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:19,603 INFO [RS:0;a437f8b9ba7d:35043 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;a437f8b9ba7d:35043. 2024-12-09T18:48:19,603 DEBUG [RS:2;a437f8b9ba7d:43343 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:19,603 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-12-09T18:48:19,603 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 823824f2f6b5769910ca732509af06f4, disabling compactions & flushes 2024-12-09T18:48:19,603 DEBUG [RS:2;a437f8b9ba7d:43343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,603 DEBUG [RS:0;a437f8b9ba7d:35043 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-12-09T18:48:19,603 INFO [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,603 DEBUG [RS:0;a437f8b9ba7d:35043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,603 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,43343,1733770097469; all regions closed. 2024-12-09T18:48:19,603 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,603 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T18:48:19,603 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. after waiting 0 ms 2024-12-09T18:48:19,603 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1325): Online Regions={823824f2f6b5769910ca732509af06f4=TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4.} 2024-12-09T18:48:19,603 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,603 DEBUG [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1351): Waiting on 823824f2f6b5769910ca732509af06f4 2024-12-09T18:48:19,603 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-12-09T18:48:19,603 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-12-09T18:48:19,604 DEBUG [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-12-09T18:48:19,604 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-12-09T18:48:19,604 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-12-09T18:48:19,604 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-12-09T18:48:19,604 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,604 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-09T18:48:19,604 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,604 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-12-09T18:48:19,604 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,604 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,604 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,604 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-12-09T18:48:19,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741835_1011 (size=93) 2024-12-09T18:48:19,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741835_1011 (size=93) 2024-12-09T18:48:19,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741835_1011 (size=93) 2024-12-09T18:48:19,612 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/default/TestHBaseWalOnEC/823824f2f6b5769910ca732509af06f4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-09T18:48:19,613 DEBUG [RS:2;a437f8b9ba7d:43343 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a437f8b9ba7d%2C43343%2C1733770097469:(num 1733770098136) 2024-12-09T18:48:19,613 DEBUG [RS:2;a437f8b9ba7d:43343 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:19,613 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:19,613 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:19,613 INFO [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,614 INFO [RS:2;a437f8b9ba7d:43343 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43343 2024-12-09T18:48:19,614 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 823824f2f6b5769910ca732509af06f4: Waiting for close lock at 1733770099603Running coprocessor pre-close hooks at 1733770099603Disabling compacts and flushes for region at 1733770099603Disabling writes for close at 1733770099603Writing region close event to WAL at 1733770099606 (+3 ms)Running coprocessor post-close hooks at 1733770099613 (+7 ms)Closed at 1733770099613 2024-12-09T18:48:19,614 DEBUG [RS_CLOSE_REGION-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4. 2024-12-09T18:48:19,623 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/info/3413cf802933484186f3a6210adc339b is 153, key is TestHBaseWalOnEC,,1733770098612.823824f2f6b5769910ca732509af06f4./info:regioninfo/1733770098992/Put/seqid=0 2024-12-09T18:48:19,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,43343,1733770097469 2024-12-09T18:48:19,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:19,624 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:19,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741840_1016 (size=6637) 2024-12-09T18:48:19,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741840_1016 (size=6637) 2024-12-09T18:48:19,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741840_1016 (size=6637) 2024-12-09T18:48:19,631 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/info/3413cf802933484186f3a6210adc339b 2024-12-09T18:48:19,633 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,43343,1733770097469] 2024-12-09T18:48:19,641 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,43343,1733770097469 already deleted, retry=false 2024-12-09T18:48:19,642 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,43343,1733770097469 expired; onlineServers=2 2024-12-09T18:48:19,652 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/ns/9145cc397692423997c307a5781a3ad0 is 43, key is default/ns:d/1733770098559/Put/seqid=0 2024-12-09T18:48:19,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741841_1017 (size=5153) 2024-12-09T18:48:19,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741841_1017 (size=5153) 2024-12-09T18:48:19,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741841_1017 (size=5153) 2024-12-09T18:48:19,659 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/ns/9145cc397692423997c307a5781a3ad0 2024-12-09T18:48:19,681 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/table/66a1fe251b374ad2b442e9d4ff6fff0a is 52, key is TestHBaseWalOnEC/table:state/1733770099007/Put/seqid=0 2024-12-09T18:48:19,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741842_1018 (size=5249) 2024-12-09T18:48:19,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741842_1018 (size=5249) 2024-12-09T18:48:19,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741842_1018 (size=5249) 2024-12-09T18:48:19,688 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/table/66a1fe251b374ad2b442e9d4ff6fff0a 2024-12-09T18:48:19,690 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:19,693 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:19,698 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/info/3413cf802933484186f3a6210adc339b as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/info/3413cf802933484186f3a6210adc339b 2024-12-09T18:48:19,698 INFO [regionserver/a437f8b9ba7d:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:19,708 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/info/3413cf802933484186f3a6210adc339b, entries=10, sequenceid=11, filesize=6.5 K 2024-12-09T18:48:19,709 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/ns/9145cc397692423997c307a5781a3ad0 as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/ns/9145cc397692423997c307a5781a3ad0 2024-12-09T18:48:19,719 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/ns/9145cc397692423997c307a5781a3ad0, entries=2, sequenceid=11, filesize=5.0 K 2024-12-09T18:48:19,721 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/.tmp/table/66a1fe251b374ad2b442e9d4ff6fff0a as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/table/66a1fe251b374ad2b442e9d4ff6fff0a 2024-12-09T18:48:19,729 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/table/66a1fe251b374ad2b442e9d4ff6fff0a, entries=2, sequenceid=11, filesize=5.1 K 2024-12-09T18:48:19,731 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false 2024-12-09T18:48:19,733 INFO [RS:2;a437f8b9ba7d:43343 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:19,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:19,733 INFO [RS:2;a437f8b9ba7d:43343 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,43343,1733770097469; zookeeper connection closed. 2024-12-09T18:48:19,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43343-0x1000c1f4ecb0003, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:19,734 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a17fc32 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a17fc32 2024-12-09T18:48:19,737 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-12-09T18:48:19,738 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-09T18:48:19,738 INFO [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:19,738 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1733770099603Running coprocessor pre-close hooks at 1733770099603Disabling compacts and flushes for region at 1733770099604 (+1 ms)Disabling writes for close at 1733770099604Obtaining lock to block concurrent updates at 1733770099604Preparing flush snapshotting stores in 1588230740 at 1733770099604Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1733770099605 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1733770099606 (+1 ms)Flushing 1588230740/info: creating writer at 1733770099606Flushing 1588230740/info: appending metadata at 1733770099623 (+17 ms)Flushing 1588230740/info: closing flushed file at 1733770099623Flushing 1588230740/ns: creating writer at 1733770099638 (+15 ms)Flushing 1588230740/ns: appending metadata at 1733770099652 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1733770099652Flushing 1588230740/table: creating writer at 1733770099665 (+13 ms)Flushing 1588230740/table: appending metadata at 1733770099680 (+15 ms)Flushing 1588230740/table: closing flushed file at 1733770099680Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5fa9930d: reopening flushed file at 1733770099696 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7f2a8c8c: reopening flushed file at 1733770099708 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45edfa74: reopening flushed file at 1733770099719 (+11 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 127ms, sequenceid=11, compaction requested=false at 1733770099731 (+12 ms)Writing region close event to WAL at 1733770099732 (+1 ms)Running coprocessor post-close hooks at 1733770099738 (+6 ms)Closed at 1733770099738 2024-12-09T18:48:19,739 DEBUG [RS_CLOSE_META-regionserver/a437f8b9ba7d:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-09T18:48:19,803 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,35043,1733770097400; all regions closed. 2024-12-09T18:48:19,804 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(976): stopping server a437f8b9ba7d,42855,1733770097440; all regions closed. 2024-12-09T18:48:19,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,804 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,804 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,804 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,805 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,805 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:19,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741834_1010 (size=1298) 2024-12-09T18:48:19,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741834_1010 (size=1298) 2024-12-09T18:48:19,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741834_1010 (size=1298) 2024-12-09T18:48:19,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741836_1012 (size=2751) 2024-12-09T18:48:19,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741836_1012 (size=2751) 2024-12-09T18:48:19,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741836_1012 (size=2751) 2024-12-09T18:48:19,810 DEBUG [RS:0;a437f8b9ba7d:35043 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs 2024-12-09T18:48:19,810 INFO [RS:0;a437f8b9ba7d:35043 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a437f8b9ba7d%2C35043%2C1733770097400:(num 1733770098129) 2024-12-09T18:48:19,810 DEBUG [RS:0;a437f8b9ba7d:35043 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:19,810 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:19,810 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-09T18:48:19,811 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:19,811 INFO [RS:0;a437f8b9ba7d:35043 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35043 2024-12-09T18:48:19,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,35043,1733770097400 2024-12-09T18:48:19,816 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:19,816 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:19,825 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,35043,1733770097400] 2024-12-09T18:48:19,833 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,35043,1733770097400 already deleted, retry=false 2024-12-09T18:48:19,833 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,35043,1733770097400 expired; onlineServers=1 2024-12-09T18:48:19,925 INFO [RS:0;a437f8b9ba7d:35043 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:19,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:19,925 INFO [RS:0;a437f8b9ba7d:35043 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,35043,1733770097400; zookeeper connection closed. 2024-12-09T18:48:19,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35043-0x1000c1f4ecb0001, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:19,926 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@372ac0df {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@372ac0df 2024-12-09T18:48:20,004 INFO [regionserver/a437f8b9ba7d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-12-09T18:48:20,004 INFO [regionserver/a437f8b9ba7d:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-12-09T18:48:20,215 DEBUG [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs 2024-12-09T18:48:20,215 INFO [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a437f8b9ba7d%2C42855%2C1733770097440.meta:.meta(num 1733770098494) 2024-12-09T18:48:20,216 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,216 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,217 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,217 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,218 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741833_1009 (size=93) 2024-12-09T18:48:20,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741833_1009 (size=93) 2024-12-09T18:48:20,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741833_1009 (size=93) 2024-12-09T18:48:20,225 DEBUG [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/oldWALs 2024-12-09T18:48:20,225 INFO [RS:1;a437f8b9ba7d:42855 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog a437f8b9ba7d%2C42855%2C1733770097440:(num 1733770098121) 2024-12-09T18:48:20,225 DEBUG [RS:1;a437f8b9ba7d:42855 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-09T18:48:20,225 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.LeaseManager(133): Closed leases 2024-12-09T18:48:20,225 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:20,225 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.ChoreService(370): Chore service for: regionserver/a437f8b9ba7d:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:20,226 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:20,226 INFO [regionserver/a437f8b9ba7d:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:20,226 INFO [RS:1;a437f8b9ba7d:42855 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42855 2024-12-09T18:48:20,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-09T18:48:20,233 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:20,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/a437f8b9ba7d,42855,1733770097440 2024-12-09T18:48:20,241 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [a437f8b9ba7d,42855,1733770097440] 2024-12-09T18:48:20,250 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/a437f8b9ba7d,42855,1733770097440 already deleted, retry=false 2024-12-09T18:48:20,250 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; a437f8b9ba7d,42855,1733770097440 expired; onlineServers=0 2024-12-09T18:48:20,250 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'a437f8b9ba7d,41833,1733770097248' ***** 2024-12-09T18:48:20,250 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-09T18:48:20,250 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-12-09T18:48:20,250 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-12-09T18:48:20,250 DEBUG [M:0;a437f8b9ba7d:41833 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-09T18:48:20,250 DEBUG [M:0;a437f8b9ba7d:41833 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-09T18:48:20,250 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770097811 {}] cleaner.HFileCleaner(306): Exit Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.small.0-1733770097811,5,FailOnTimeoutGroup] 2024-12-09T18:48:20,250 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-09T18:48:20,250 DEBUG [master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770097811 {}] cleaner.HFileCleaner(306): Exit Thread[master/a437f8b9ba7d:0:becomeActiveMaster-HFileCleaner.large.0-1733770097811,5,FailOnTimeoutGroup] 2024-12-09T18:48:20,250 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.ChoreService(370): Chore service for: master/a437f8b9ba7d:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-12-09T18:48:20,251 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-12-09T18:48:20,251 DEBUG [M:0;a437f8b9ba7d:41833 {}] master.HMaster(1795): Stopping service threads 2024-12-09T18:48:20,251 INFO [M:0;a437f8b9ba7d:41833 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-09T18:48:20,251 INFO [M:0;a437f8b9ba7d:41833 {}] procedure2.ProcedureExecutor(723): Stopping 2024-12-09T18:48:20,251 INFO [M:0;a437f8b9ba7d:41833 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-09T18:48:20,251 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-09T18:48:20,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-09T18:48:20,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-09T18:48:20,324 DEBUG [M:0;a437f8b9ba7d:41833 {}] zookeeper.ZKUtil(347): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-09T18:48:20,324 WARN [M:0;a437f8b9ba7d:41833 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-09T18:48:20,326 INFO [M:0;a437f8b9ba7d:41833 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/.lastflushedseqids 2024-12-09T18:48:20,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741843_1019 (size=127) 2024-12-09T18:48:20,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:20,342 INFO [RS:1;a437f8b9ba7d:42855 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:20,342 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42855-0x1000c1f4ecb0002, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:20,342 INFO [RS:1;a437f8b9ba7d:42855 {}] regionserver.HRegionServer(1031): Exiting; stopping=a437f8b9ba7d,42855,1733770097440; zookeeper connection closed. 2024-12-09T18:48:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741843_1019 (size=127) 2024-12-09T18:48:20,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741843_1019 (size=127) 2024-12-09T18:48:20,342 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@26e7bc24 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@26e7bc24 2024-12-09T18:48:20,342 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-09T18:48:20,343 INFO [M:0;a437f8b9ba7d:41833 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-12-09T18:48:20,343 INFO [M:0;a437f8b9ba7d:41833 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-09T18:48:20,343 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-09T18:48:20,343 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:20,343 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:20,343 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-09T18:48:20,343 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:20,343 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.82 KB heapSize=34.11 KB 2024-12-09T18:48:20,359 DEBUG [M:0;a437f8b9ba7d:41833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c18d9f539c804760b30d89f4c53e3a33 is 82, key is hbase:meta,,1/info:regioninfo/1733770098529/Put/seqid=0 2024-12-09T18:48:20,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741844_1020 (size=5672) 2024-12-09T18:48:20,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741844_1020 (size=5672) 2024-12-09T18:48:20,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741844_1020 (size=5672) 2024-12-09T18:48:20,367 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c18d9f539c804760b30d89f4c53e3a33 2024-12-09T18:48:20,387 DEBUG [M:0;a437f8b9ba7d:41833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83e0ccf920974b82a3fffb9fb4674d32 is 747, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1733770099014/Put/seqid=0 2024-12-09T18:48:20,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741845_1021 (size=6438) 2024-12-09T18:48:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741845_1021 (size=6438) 2024-12-09T18:48:20,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741845_1021 (size=6438) 2024-12-09T18:48:20,395 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.13 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83e0ccf920974b82a3fffb9fb4674d32 2024-12-09T18:48:20,414 DEBUG [M:0;a437f8b9ba7d:41833 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/55278812eb9c4433bf326f89ee7fcf0f is 69, key is a437f8b9ba7d,35043,1733770097400/rs:state/1733770097932/Put/seqid=0 2024-12-09T18:48:20,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741846_1022 (size=5294) 2024-12-09T18:48:20,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741846_1022 (size=5294) 2024-12-09T18:48:20,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741846_1022 (size=5294) 2024-12-09T18:48:20,421 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/55278812eb9c4433bf326f89ee7fcf0f 2024-12-09T18:48:20,450 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c18d9f539c804760b30d89f4c53e3a33 as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c18d9f539c804760b30d89f4c53e3a33 2024-12-09T18:48:20,456 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c18d9f539c804760b30d89f4c53e3a33, entries=8, sequenceid=72, filesize=5.5 K 2024-12-09T18:48:20,457 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/83e0ccf920974b82a3fffb9fb4674d32 as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/83e0ccf920974b82a3fffb9fb4674d32 2024-12-09T18:48:20,462 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/83e0ccf920974b82a3fffb9fb4674d32, entries=8, sequenceid=72, filesize=6.3 K 2024-12-09T18:48:20,463 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/55278812eb9c4433bf326f89ee7fcf0f as hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/55278812eb9c4433bf326f89ee7fcf0f 2024-12-09T18:48:20,469 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:40897/user/jenkins/test-data/1e5b330c-efb8-4f38-8ded-8b9c30509878/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/55278812eb9c4433bf326f89ee7fcf0f, entries=3, sequenceid=72, filesize=5.2 K 2024-12-09T18:48:20,471 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false 2024-12-09T18:48:20,472 INFO [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-09T18:48:20,472 DEBUG [M:0;a437f8b9ba7d:41833 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1733770100343Disabling compacts and flushes for region at 1733770100343Disabling writes for close at 1733770100343Obtaining lock to block concurrent updates at 1733770100343Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1733770100343Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27459, getHeapSize=34864, getOffHeapSize=0, getCellsCount=85 at 1733770100344 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1733770100345 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1733770100345Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1733770100359 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1733770100359Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1733770100372 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1733770100386 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1733770100386Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1733770100401 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1733770100413 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1733770100414 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@19ed0fa0: reopening flushed file at 1733770100449 (+35 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27717bfa: reopening flushed file at 1733770100456 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eec906c: reopening flushed file at 1733770100462 (+6 ms)Finished flush of dataSize ~26.82 KB/27459, heapSize ~33.81 KB/34624, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 128ms, sequenceid=72, compaction requested=false at 1733770100471 (+9 ms)Writing region close event to WAL at 1733770100472 (+1 ms)Closed at 1733770100472 2024-12-09T18:48:20,474 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,474 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,474 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,474 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,474 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-12-09T18:48:20,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44265 is added to blk_1073741830_1006 (size=32662) 2024-12-09T18:48:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42521 is added to blk_1073741830_1006 (size=32662) 2024-12-09T18:48:20,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45639 is added to blk_1073741830_1006 (size=32662) 2024-12-09T18:48:20,478 INFO [M:0;a437f8b9ba7d:41833 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-12-09T18:48:20,478 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-12-09T18:48:20,478 INFO [M:0;a437f8b9ba7d:41833 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41833 2024-12-09T18:48:20,478 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-12-09T18:48:20,583 INFO [M:0;a437f8b9ba7d:41833 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-12-09T18:48:20,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:20,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41833-0x1000c1f4ecb0000, quorum=127.0.0.1:60447, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-09T18:48:20,590 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@44672b71{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:20,590 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@36a9ca95{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:20,591 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:20,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@438bc7ce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:20,591 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4c77de1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:20,593 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:20,593 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:20,593 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1051587299-172.17.0.2-1733770095587 (Datanode Uuid b44ebac1-feff-4fc1-bec6-08cb852937b0) service to localhost/127.0.0.1:40897 2024-12-09T18:48:20,593 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:20,594 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data5/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,595 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data6/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,595 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:20,598 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@30bdc6f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:20,599 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5547eae9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:20,599 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:20,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1b0441b5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:20,600 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4e5afbc4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:20,602 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:20,602 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:20,602 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1051587299-172.17.0.2-1733770095587 (Datanode Uuid 9e3a0b98-2f61-48b7-82b0-0ea68294af69) service to localhost/127.0.0.1:40897 2024-12-09T18:48:20,602 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:20,602 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data3/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data4/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,603 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:20,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eeef71e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-09T18:48:20,605 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@70feba44{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:20,605 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:20,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20a0e688{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:20,605 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61d23bc{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:20,606 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-09T18:48:20,606 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-09T18:48:20,606 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-09T18:48:20,606 WARN [BP-1051587299-172.17.0.2-1733770095587 heartbeating to localhost/127.0.0.1:40897 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1051587299-172.17.0.2-1733770095587 (Datanode Uuid fa37c143-69b9-426e-9f3a-909bc46c5d1a) service to localhost/127.0.0.1:40897 2024-12-09T18:48:20,607 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data1/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,607 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/cluster_6633b0c3-5da6-2b2d-dcb7-2ed9f1faead3/data/data2/current/BP-1051587299-172.17.0.2-1733770095587 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-09T18:48:20,607 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-09T18:48:20,612 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3c97821d{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-09T18:48:20,613 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@544c0dd2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-09T18:48:20,613 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-09T18:48:20,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62802e4c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-09T18:48:20,613 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16eaa68d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/aa247262-e897-cca0-5e20-66d0ddc1154c/hadoop.log.dir/,STOPPED} 2024-12-09T18:48:20,620 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-12-09T18:48:20,641 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-12-09T18:48:20,648 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=155 (was 92) - Thread LEAK? -, OpenFileDescriptor=518 (was 437) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=334 (was 329) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2607 (was 2803)