2024-11-11 16:31:43,749 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-11 16:31:43,777 main DEBUG Took 0.024700 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 16:31:43,777 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 16:31:43,784 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 16:31:43,786 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 16:31:43,788 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,805 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 16:31:43,829 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,831 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,833 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,833 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,834 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,834 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,835 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,836 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,837 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,837 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,838 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,839 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,839 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,840 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,841 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,841 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,842 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,842 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,843 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,843 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,844 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,844 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,845 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,846 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 16:31:43,846 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,847 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 16:31:43,849 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 16:31:43,850 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 16:31:43,852 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 16:31:43,853 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 16:31:43,854 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 16:31:43,855 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 16:31:43,865 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 16:31:43,868 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 16:31:43,870 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 16:31:43,871 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 16:31:43,871 main DEBUG createAppenders(={Console}) 2024-11-11 16:31:43,872 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba initialized 2024-11-11 16:31:43,873 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba 2024-11-11 16:31:43,873 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@24569dba OK. 2024-11-11 16:31:43,874 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 16:31:43,874 main DEBUG OutputStream closed 2024-11-11 16:31:43,875 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 16:31:43,875 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 16:31:43,875 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@49c7b90e OK 2024-11-11 16:31:43,964 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 16:31:43,967 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 16:31:43,968 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 16:31:43,970 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 16:31:43,971 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 16:31:43,971 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 16:31:43,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 16:31:43,972 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 16:31:43,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 16:31:43,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 16:31:43,973 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 16:31:43,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 16:31:43,974 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 16:31:43,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 16:31:43,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 16:31:43,975 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 16:31:43,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 16:31:43,976 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 16:31:43,979 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 16:31:43,979 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@35432107) with optional ClassLoader: null 2024-11-11 16:31:43,980 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 16:31:43,981 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@35432107] started OK. 2024-11-11T16:31:44,000 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-11 16:31:44,005 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 16:31:44,005 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T16:31:44,388 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e 2024-11-11T16:31:44,424 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b, deleteOnExit=true 2024-11-11T16:31:44,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/test.cache.data in system properties and HBase conf 2024-11-11T16:31:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T16:31:44,427 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir in system properties and HBase conf 2024-11-11T16:31:44,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T16:31:44,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T16:31:44,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T16:31:44,560 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T16:31:44,701 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T16:31:44,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:31:44,707 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:31:44,708 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T16:31:44,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:31:44,709 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T16:31:44,710 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T16:31:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:31:44,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:31:44,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T16:31:44,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/nfs.dump.dir in system properties and HBase conf 2024-11-11T16:31:44,713 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/java.io.tmpdir in system properties and HBase conf 2024-11-11T16:31:44,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:31:44,714 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T16:31:44,715 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T16:31:45,650 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T16:31:45,751 INFO [Time-limited test {}] log.Log(170): Logging initialized @2935ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T16:31:45,845 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:45,925 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:45,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:45,975 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:45,977 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T16:31:45,994 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:45,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:45,997 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:46,205 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@76e4c45c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/java.io.tmpdir/jetty-localhost-41889-hadoop-hdfs-3_4_1-tests_jar-_-any-7826005359311332879/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:31:46,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:41889} 2024-11-11T16:31:46,212 INFO [Time-limited test {}] server.Server(415): Started @3398ms 2024-11-11T16:31:46,638 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:46,646 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:46,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:46,649 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:46,649 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:31:46,650 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:46,651 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:46,802 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e705dc8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/java.io.tmpdir/jetty-localhost-41787-hadoop-hdfs-3_4_1-tests_jar-_-any-6185622005950913693/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:46,803 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:41787} 2024-11-11T16:31:46,803 INFO [Time-limited test {}] server.Server(415): Started @3989ms 2024-11-11T16:31:46,861 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:46,991 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:47,001 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:47,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:47,004 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:47,004 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T16:31:47,008 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:47,009 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:47,131 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@26b068f7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/java.io.tmpdir/jetty-localhost-36287-hadoop-hdfs-3_4_1-tests_jar-_-any-4434556082461874996/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:47,132 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:36287} 2024-11-11T16:31:47,132 INFO [Time-limited test {}] server.Server(415): Started @4318ms 2024-11-11T16:31:47,135 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:47,187 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:47,194 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:47,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:47,197 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:47,198 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:31:47,198 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:47,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:47,327 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f750918{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/java.io.tmpdir/jetty-localhost-35919-hadoop-hdfs-3_4_1-tests_jar-_-any-15780200158041415906/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:47,328 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:35919} 2024-11-11T16:31:47,328 INFO [Time-limited test {}] server.Server(415): Started @4513ms 2024-11-11T16:31:47,331 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:47,349 WARN [Thread-111 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data2/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,349 WARN [Thread-110 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data4/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,350 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data1/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,350 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data3/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,401 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:47,402 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:47,473 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data5/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,488 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data6/current/BP-1543572805-172.17.0.2-1731342705390/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:47,504 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x158a7c7bb5967c06 with lease ID 0x94088249be20c526: Processing first storage report for DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e from datanode DatanodeRegistration(127.0.0.1:45373, datanodeUuid=82b774a6-c434-4b51-a970-9849cab83a86, infoPort=40731, infoSecurePort=0, ipcPort=32785, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x158a7c7bb5967c06 with lease ID 0x94088249be20c526: from storage DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e node DatanodeRegistration(127.0.0.1:45373, datanodeUuid=82b774a6-c434-4b51-a970-9849cab83a86, infoPort=40731, infoSecurePort=0, ipcPort=32785, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfe258914f4ba6a0 with lease ID 0x94088249be20c525: Processing first storage report for DS-667085a3-27ed-4c40-b34e-ca8b04a145ec from datanode DatanodeRegistration(127.0.0.1:44767, datanodeUuid=e66bc071-0f56-4cc2-9193-d9231e8ec122, infoPort=33429, infoSecurePort=0, ipcPort=45215, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfe258914f4ba6a0 with lease ID 0x94088249be20c525: from storage DS-667085a3-27ed-4c40-b34e-ca8b04a145ec node DatanodeRegistration(127.0.0.1:44767, datanodeUuid=e66bc071-0f56-4cc2-9193-d9231e8ec122, infoPort=33429, infoSecurePort=0, ipcPort=45215, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x158a7c7bb5967c06 with lease ID 0x94088249be20c526: Processing first storage report for DS-412c7ad6-0f33-49cc-bbb4-b2c726bb7d44 from datanode DatanodeRegistration(127.0.0.1:45373, datanodeUuid=82b774a6-c434-4b51-a970-9849cab83a86, infoPort=40731, infoSecurePort=0, ipcPort=32785, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x158a7c7bb5967c06 with lease ID 0x94088249be20c526: from storage DS-412c7ad6-0f33-49cc-bbb4-b2c726bb7d44 node DatanodeRegistration(127.0.0.1:45373, datanodeUuid=82b774a6-c434-4b51-a970-9849cab83a86, infoPort=40731, infoSecurePort=0, ipcPort=32785, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xcfe258914f4ba6a0 with lease ID 0x94088249be20c525: Processing first storage report for DS-dc6ee8c7-f6d3-47a2-8aa2-2e2bfe3e57f6 from datanode DatanodeRegistration(127.0.0.1:44767, datanodeUuid=e66bc071-0f56-4cc2-9193-d9231e8ec122, infoPort=33429, infoSecurePort=0, ipcPort=45215, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xcfe258914f4ba6a0 with lease ID 0x94088249be20c525: from storage DS-dc6ee8c7-f6d3-47a2-8aa2-2e2bfe3e57f6 node DatanodeRegistration(127.0.0.1:44767, datanodeUuid=e66bc071-0f56-4cc2-9193-d9231e8ec122, infoPort=33429, infoSecurePort=0, ipcPort=45215, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,522 WARN [Thread-115 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:47,528 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12aa2f35c37c6f83 with lease ID 0x94088249be20c527: Processing first storage report for DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765 from datanode DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5a0763cd-2f13-4b3d-923e-cfb2ea4c63ea, infoPort=45699, infoSecurePort=0, ipcPort=38279, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12aa2f35c37c6f83 with lease ID 0x94088249be20c527: from storage DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765 node DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5a0763cd-2f13-4b3d-923e-cfb2ea4c63ea, infoPort=45699, infoSecurePort=0, ipcPort=38279, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,529 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x12aa2f35c37c6f83 with lease ID 0x94088249be20c527: Processing first storage report for DS-fa528ec2-c2d6-4df6-a4f3-0b8c050f7bd4 from datanode DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5a0763cd-2f13-4b3d-923e-cfb2ea4c63ea, infoPort=45699, infoSecurePort=0, ipcPort=38279, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390) 2024-11-11T16:31:47,529 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x12aa2f35c37c6f83 with lease ID 0x94088249be20c527: from storage DS-fa528ec2-c2d6-4df6-a4f3-0b8c050f7bd4 node DatanodeRegistration(127.0.0.1:41631, datanodeUuid=5a0763cd-2f13-4b3d-923e-cfb2ea4c63ea, infoPort=45699, infoSecurePort=0, ipcPort=38279, storageInfo=lv=-57;cid=testClusterID;nsid=502407922;c=1731342705390), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:47,787 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e 2024-11-11T16:31:47,871 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-11T16:31:47,938 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=158, OpenFileDescriptor=391, MaxFileDescriptor=1048576, SystemLoadAverage=507, ProcessCount=11, AvailableMemoryMB=3356 2024-11-11T16:31:47,940 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T16:31:47,951 INFO [Time-limited test {}] hbase.HBaseTestingUtil(821): NOT STARTING DFS 2024-11-11T16:31:48,041 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/zookeeper_0, clientPort=57850, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T16:31:48,054 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57850 2024-11-11T16:31:48,070 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:48,074 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:48,201 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:48,202 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:48,252 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54700 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54700 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:48,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-11T16:31:48,672 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:48,681 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 with version=8 2024-11-11T16:31:48,682 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/hbase-staging 2024-11-11T16:31:48,777 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T16:31:49,043 INFO [Time-limited test {}] client.ConnectionUtils(128): master/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:49,054 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,055 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,061 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:49,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:49,241 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T16:31:49,316 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T16:31:49,326 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T16:31:49,330 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:49,359 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 25416 (auto-detected) 2024-11-11T16:31:49,360 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T16:31:49,380 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45041 2024-11-11T16:31:49,404 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45041 connecting to ZooKeeper ensemble=127.0.0.1:57850 2024-11-11T16:31:49,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:450410x0, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:49,441 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45041-0x1002faf37cd0000 connected 2024-11-11T16:31:49,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,476 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,487 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:49,493 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571, hbase.cluster.distributed=false 2024-11-11T16:31:49,524 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:49,531 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45041 2024-11-11T16:31:49,532 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45041 2024-11-11T16:31:49,533 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45041 2024-11-11T16:31:49,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45041 2024-11-11T16:31:49,534 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45041 2024-11-11T16:31:49,661 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:49,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,663 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:49,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,663 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:49,666 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:49,668 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:49,669 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34889 2024-11-11T16:31:49,672 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34889 connecting to ZooKeeper ensemble=127.0.0.1:57850 2024-11-11T16:31:49,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,678 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:348890x0, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:49,686 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:348890x0, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:49,689 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34889-0x1002faf37cd0001 connected 2024-11-11T16:31:49,691 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:49,701 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:49,704 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:49,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:49,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34889 2024-11-11T16:31:49,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34889 2024-11-11T16:31:49,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34889 2024-11-11T16:31:49,715 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34889 2024-11-11T16:31:49,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34889 2024-11-11T16:31:49,733 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:49,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,733 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,734 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:49,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,734 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:49,734 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:49,735 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:49,736 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42593 2024-11-11T16:31:49,738 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42593 connecting to ZooKeeper ensemble=127.0.0.1:57850 2024-11-11T16:31:49,739 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,742 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,750 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:425930x0, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:49,751 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:425930x0, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:49,752 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:49,753 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42593-0x1002faf37cd0002 connected 2024-11-11T16:31:49,757 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:49,758 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:49,760 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:49,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42593 2024-11-11T16:31:49,764 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42593 2024-11-11T16:31:49,768 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42593 2024-11-11T16:31:49,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42593 2024-11-11T16:31:49,769 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42593 2024-11-11T16:31:49,787 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:49,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,787 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,788 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:49,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:49,788 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:49,788 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:49,789 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:49,790 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37705 2024-11-11T16:31:49,792 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37705 connecting to ZooKeeper ensemble=127.0.0.1:57850 2024-11-11T16:31:49,794 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,796 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:49,801 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377050x0, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:49,802 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37705-0x1002faf37cd0003 connected 2024-11-11T16:31:49,803 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:49,803 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:49,808 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:49,810 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:49,812 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:49,815 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-11T16:31:49,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37705 2024-11-11T16:31:49,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37705 2024-11-11T16:31:49,817 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-11T16:31:49,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37705 2024-11-11T16:31:49,834 DEBUG [M:0;16b413a53992:45041 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;16b413a53992:45041 2024-11-11T16:31:49,835 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/16b413a53992,45041,1731342708831 2024-11-11T16:31:49,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,842 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,843 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,847 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/16b413a53992,45041,1731342708831 2024-11-11T16:31:49,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:49,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:49,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:49,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:49,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:49,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:49,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:49,869 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T16:31:49,870 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/16b413a53992,45041,1731342708831 from backup master directory 2024-11-11T16:31:49,873 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/16b413a53992,45041,1731342708831 2024-11-11T16:31:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,874 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:49,875 WARN [master/16b413a53992:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:49,876 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=16b413a53992,45041,1731342708831 2024-11-11T16:31:49,879 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T16:31:49,881 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T16:31:49,953 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/hbase.id] with ID: 77dc20a1-7a9e-4e31-96f8-0c6460b561fe 2024-11-11T16:31:49,953 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/.tmp/hbase.id 2024-11-11T16:31:49,960 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:49,960 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:49,966 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54740 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54740 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:49,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-11T16:31:49,974 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:49,975 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/.tmp/hbase.id]:[hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/hbase.id] 2024-11-11T16:31:50,020 INFO [master/16b413a53992:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:50,024 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T16:31:50,045 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 19ms. 2024-11-11T16:31:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,049 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,063 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,063 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,067 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:59696 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:44767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59696 dst: /127.0.0.1:44767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-11T16:31:50,074 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:50,096 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:31:50,099 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T16:31:50,109 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:31:50,139 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,140 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54758 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54758 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-11T16:31:50,151 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:50,170 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store 2024-11-11T16:31:50,194 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,194 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54778 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54778 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-11T16:31:50,206 WARN [master/16b413a53992:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:50,210 INFO [master/16b413a53992:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T16:31:50,213 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:50,214 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:31:50,215 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:50,215 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:50,216 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:31:50,217 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:50,217 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:50,218 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342710214Disabling compacts and flushes for region at 1731342710214Disabling writes for close at 1731342710216 (+2 ms)Writing region close event to WAL at 1731342710217 (+1 ms)Closed at 1731342710217 2024-11-11T16:31:50,220 WARN [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/.initializing 2024-11-11T16:31:50,220 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/WALs/16b413a53992,45041,1731342708831 2024-11-11T16:31:50,231 INFO [master/16b413a53992:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:31:50,253 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C45041%2C1731342708831, suffix=, logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/WALs/16b413a53992,45041,1731342708831, archiveDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/oldWALs, maxLogs=10 2024-11-11T16:31:50,298 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/WALs/16b413a53992,45041,1731342708831/16b413a53992%2C45041%2C1731342708831.1731342710258, exclude list is [], retry=0 2024-11-11T16:31:50,324 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:413) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:472) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$5.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:467) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45373,DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e,DISK] 2024-11-11T16:31:50,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41631,DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765,DISK] 2024-11-11T16:31:50,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44767,DS-667085a3-27ed-4c40-b34e-ca8b04a145ec,DISK] 2024-11-11T16:31:50,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-11T16:31:50,377 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/WALs/16b413a53992,45041,1731342708831/16b413a53992%2C45041%2C1731342708831.1731342710258 2024-11-11T16:31:50,378 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33429:33429),(127.0.0.1/127.0.0.1:45699:45699),(127.0.0.1/127.0.0.1:40731:40731)] 2024-11-11T16:31:50,379 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:50,380 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:50,383 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,385 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,434 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-11T16:31:50,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-11T16:31:50,482 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T16:31:50,487 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:50,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:50,491 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,498 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T16:31:50,498 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:50,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:50,499 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T16:31:50,502 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:50,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:50,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T16:31:50,507 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:50,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:50,509 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,513 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,514 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,521 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,522 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,526 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:50,530 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:50,538 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:50,539 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59674917, jitterRate=-0.1107744425535202}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:50,547 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731342710403Initializing all the Stores at 1731342710405 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342710406 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342710407 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342710407Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342710407Cleaning up temporary data from old regions at 1731342710522 (+115 ms)Region opened successfully at 1731342710547 (+25 ms) 2024-11-11T16:31:50,548 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T16:31:50,589 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5caee03b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:50,627 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T16:31:50,640 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T16:31:50,640 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T16:31:50,644 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T16:31:50,645 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T16:31:50,653 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 6 msec 2024-11-11T16:31:50,653 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T16:31:50,682 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T16:31:50,692 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T16:31:50,694 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T16:31:50,697 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T16:31:50,698 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T16:31:50,700 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T16:31:50,703 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T16:31:50,708 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T16:31:50,709 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T16:31:50,711 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T16:31:50,712 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T16:31:50,731 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T16:31:50,732 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T16:31:50,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:50,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:50,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:50,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:50,736 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,737 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,740 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=16b413a53992,45041,1731342708831, sessionid=0x1002faf37cd0000, setting cluster-up flag (Was=false) 2024-11-11T16:31:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,756 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,762 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T16:31:50,764 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,45041,1731342708831 2024-11-11T16:31:50,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,770 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:50,776 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T16:31:50,778 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,45041,1731342708831 2024-11-11T16:31:50,786 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T16:31:50,822 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(746): ClusterId : 77dc20a1-7a9e-4e31-96f8-0c6460b561fe 2024-11-11T16:31:50,822 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(746): ClusterId : 77dc20a1-7a9e-4e31-96f8-0c6460b561fe 2024-11-11T16:31:50,823 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(746): ClusterId : 77dc20a1-7a9e-4e31-96f8-0c6460b561fe 2024-11-11T16:31:50,826 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:50,826 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:50,826 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:50,833 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:50,833 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:50,834 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:50,834 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:50,837 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:50,838 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:50,838 DEBUG [RS:1;16b413a53992:42593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7892cf1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:50,838 DEBUG [RS:0;16b413a53992:34889 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73ea0263, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:50,842 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:50,842 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:50,845 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:50,845 DEBUG [RS:2;16b413a53992:37705 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c2d13d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:50,856 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;16b413a53992:42593 2024-11-11T16:31:50,860 INFO [RS:1;16b413a53992:42593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:50,860 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;16b413a53992:37705 2024-11-11T16:31:50,860 INFO [RS:1;16b413a53992:42593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:50,860 INFO [RS:2;16b413a53992:37705 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:50,860 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;16b413a53992:34889 2024-11-11T16:31:50,860 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:50,860 INFO [RS:2;16b413a53992:37705 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:50,861 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:50,861 INFO [RS:0;16b413a53992:34889 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:50,861 INFO [RS:0;16b413a53992:34889 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:50,861 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:50,864 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=37705, startcode=1731342709786 2024-11-11T16:31:50,864 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=42593, startcode=1731342709733 2024-11-11T16:31:50,864 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=34889, startcode=1731342709618 2024-11-11T16:31:50,877 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:50,878 DEBUG [RS:1;16b413a53992:42593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:50,878 DEBUG [RS:2;16b413a53992:37705 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:50,878 DEBUG [RS:0;16b413a53992:34889 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:50,889 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T16:31:50,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T16:31:50,908 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 16b413a53992,45041,1731342708831 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T16:31:50,925 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50353, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:50,926 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42191, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:50,926 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57097, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:50,926 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:50,926 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:50,926 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:50,927 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:50,927 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/16b413a53992:0, corePoolSize=10, maxPoolSize=10 2024-11-11T16:31:50,927 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:50,927 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:50,927 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:50,933 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:31:50,941 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:31:50,942 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-11T16:31:50,942 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:50,943 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T16:31:50,943 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731342740943 2024-11-11T16:31:50,945 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T16:31:50,946 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T16:31:50,950 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:50,951 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T16:31:50,951 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T16:31:50,951 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T16:31:50,952 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T16:31:50,952 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T16:31:50,956 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:50,961 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T16:31:50,963 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T16:31:50,963 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,963 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,963 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T16:31:50,966 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T16:31:50,967 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T16:31:50,967 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:59736 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:44767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59736 dst: /127.0.0.1:44767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,969 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342710968,5,FailOnTimeoutGroup] 2024-11-11T16:31:50,969 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342710969,5,FailOnTimeoutGroup] 2024-11-11T16:31:50,969 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:50,969 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T16:31:50,971 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:50,971 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:50,972 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:31:50,972 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:31:50,972 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-11T16:31:50,972 WARN [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:31:50,972 WARN [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:31:50,972 WARN [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T16:31:50,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775712_1013 (size=1321) 2024-11-11T16:31:50,975 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:50,976 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T16:31:50,977 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 2024-11-11T16:31:50,982 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,983 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:50,988 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:42944 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:45373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42944 dst: /127.0.0.1:45373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:50,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-11T16:31:50,994 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:50,996 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:50,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:31:51,001 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:31:51,001 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:51,002 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:51,003 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:31:51,005 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:31:51,005 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:51,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:51,006 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:31:51,008 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:31:51,008 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:51,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:51,009 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:31:51,011 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:31:51,011 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:51,012 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:51,012 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:31:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740 2024-11-11T16:31:51,014 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740 2024-11-11T16:31:51,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:31:51,017 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:31:51,018 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:51,020 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:31:51,025 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:51,026 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70123336, jitterRate=0.04491913318634033}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:51,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731342710996Initializing all the Stores at 1731342710998 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342710998Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342710999 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342710999Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342710999Cleaning up temporary data from old regions at 1731342711017 (+18 ms)Region opened successfully at 1731342711029 (+12 ms) 2024-11-11T16:31:51,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:31:51,029 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:31:51,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:31:51,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:31:51,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:31:51,031 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:51,031 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342711029Disabling compacts and flushes for region at 1731342711029Disabling writes for close at 1731342711029Writing region close event to WAL at 1731342711030 (+1 ms)Closed at 1731342711030 2024-11-11T16:31:51,034 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:51,034 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T16:31:51,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T16:31:51,056 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:31:51,069 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T16:31:51,073 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=37705, startcode=1731342709786 2024-11-11T16:31:51,073 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=34889, startcode=1731342709618 2024-11-11T16:31:51,075 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,45041,1731342708831 with port=42593, startcode=1731342709733 2024-11-11T16:31:51,076 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,37705,1731342709786 2024-11-11T16:31:51,079 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(517): Registering regionserver=16b413a53992,37705,1731342709786 2024-11-11T16:31:51,088 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,34889,1731342709618 2024-11-11T16:31:51,088 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(517): Registering regionserver=16b413a53992,34889,1731342709618 2024-11-11T16:31:51,088 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 2024-11-11T16:31:51,088 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37187 2024-11-11T16:31:51,089 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:51,094 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:51,095 DEBUG [RS:2;16b413a53992:37705 {}] zookeeper.ZKUtil(111): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,37705,1731342709786 2024-11-11T16:31:51,095 WARN [RS:2;16b413a53992:37705 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:51,096 INFO [RS:2;16b413a53992:37705 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:31:51,096 DEBUG [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,37705,1731342709786 2024-11-11T16:31:51,101 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,42593,1731342709733 2024-11-11T16:31:51,101 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 2024-11-11T16:31:51,101 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45041 {}] master.ServerManager(517): Registering regionserver=16b413a53992,42593,1731342709733 2024-11-11T16:31:51,101 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37187 2024-11-11T16:31:51,101 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:51,106 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 2024-11-11T16:31:51,106 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37187 2024-11-11T16:31:51,106 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:51,109 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,37705,1731342709786] 2024-11-11T16:31:51,110 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:51,112 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,42593,1731342709733] 2024-11-11T16:31:51,112 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,34889,1731342709618] 2024-11-11T16:31:51,113 DEBUG [RS:1;16b413a53992:42593 {}] zookeeper.ZKUtil(111): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,42593,1731342709733 2024-11-11T16:31:51,113 WARN [RS:1;16b413a53992:42593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:51,113 INFO [RS:1;16b413a53992:42593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:31:51,113 DEBUG [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,42593,1731342709733 2024-11-11T16:31:51,115 DEBUG [RS:0;16b413a53992:34889 {}] zookeeper.ZKUtil(111): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,34889,1731342709618 2024-11-11T16:31:51,115 WARN [RS:0;16b413a53992:34889 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:51,115 INFO [RS:0;16b413a53992:34889 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:31:51,115 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618 2024-11-11T16:31:51,142 INFO [RS:2;16b413a53992:37705 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:51,142 INFO [RS:0;16b413a53992:34889 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:51,144 INFO [RS:1;16b413a53992:42593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:51,159 INFO [RS:2;16b413a53992:37705 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:51,160 INFO [RS:0;16b413a53992:34889 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:51,161 INFO [RS:1;16b413a53992:42593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:51,165 INFO [RS:0;16b413a53992:34889 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:51,165 INFO [RS:2;16b413a53992:37705 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:51,165 INFO [RS:1;16b413a53992:42593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:51,165 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,165 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,165 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,166 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:51,166 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:51,166 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:51,173 INFO [RS:1;16b413a53992:42593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:51,173 INFO [RS:0;16b413a53992:34889 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:51,173 INFO [RS:2;16b413a53992:37705 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:51,175 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,175 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,175 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,175 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,175 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:51,176 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:51,176 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:51,176 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,176 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,177 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,177 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,178 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:51,178 DEBUG [RS:0;16b413a53992:34889 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,178 DEBUG [RS:1;16b413a53992:42593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,178 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,178 DEBUG [RS:2;16b413a53992:37705 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42593,1731342709733-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,34889,1731342709618-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:51,180 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,181 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,181 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,37705,1731342709786-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:51,203 INFO [RS:2;16b413a53992:37705 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:51,205 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,37705,1731342709786-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,206 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,206 INFO [RS:2;16b413a53992:37705 {}] regionserver.Replication(171): 16b413a53992,37705,1731342709786 started 2024-11-11T16:31:51,207 INFO [RS:1;16b413a53992:42593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:51,207 INFO [RS:0;16b413a53992:34889 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:51,208 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42593,1731342709733-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,208 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,34889,1731342709618-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,208 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,208 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,208 INFO [RS:1;16b413a53992:42593 {}] regionserver.Replication(171): 16b413a53992,42593,1731342709733 started 2024-11-11T16:31:51,208 INFO [RS:0;16b413a53992:34889 {}] regionserver.Replication(171): 16b413a53992,34889,1731342709618 started 2024-11-11T16:31:51,221 WARN [16b413a53992:45041 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T16:31:51,227 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,227 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,37705,1731342709786, RpcServer on 16b413a53992/172.17.0.2:37705, sessionid=0x1002faf37cd0003 2024-11-11T16:31:51,228 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:51,228 DEBUG [RS:2;16b413a53992:37705 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,37705,1731342709786 2024-11-11T16:31:51,228 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,37705,1731342709786' 2024-11-11T16:31:51,228 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,37705,1731342709786 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,37705,1731342709786' 2024-11-11T16:31:51,230 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:51,231 DEBUG [RS:2;16b413a53992:37705 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:51,232 DEBUG [RS:2;16b413a53992:37705 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:51,232 INFO [RS:2;16b413a53992:37705 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:51,232 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,232 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:51,232 INFO [RS:2;16b413a53992:37705 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:51,232 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,34889,1731342709618, RpcServer on 16b413a53992/172.17.0.2:34889, sessionid=0x1002faf37cd0001 2024-11-11T16:31:51,232 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,42593,1731342709733, RpcServer on 16b413a53992/172.17.0.2:42593, sessionid=0x1002faf37cd0002 2024-11-11T16:31:51,232 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:51,232 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:51,232 DEBUG [RS:0;16b413a53992:34889 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,34889,1731342709618 2024-11-11T16:31:51,232 DEBUG [RS:1;16b413a53992:42593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,42593,1731342709733 2024-11-11T16:31:51,233 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42593,1731342709733' 2024-11-11T16:31:51,233 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,34889,1731342709618' 2024-11-11T16:31:51,233 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:51,233 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:51,233 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:51,233 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:51,234 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:51,234 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:51,234 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:51,234 DEBUG [RS:0;16b413a53992:34889 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,34889,1731342709618 2024-11-11T16:31:51,234 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:51,234 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,34889,1731342709618' 2024-11-11T16:31:51,234 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:51,234 DEBUG [RS:1;16b413a53992:42593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,42593,1731342709733 2024-11-11T16:31:51,234 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42593,1731342709733' 2024-11-11T16:31:51,234 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:51,235 DEBUG [RS:0;16b413a53992:34889 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:51,235 DEBUG [RS:1;16b413a53992:42593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:51,236 DEBUG [RS:0;16b413a53992:34889 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:51,236 INFO [RS:0;16b413a53992:34889 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:51,236 INFO [RS:0;16b413a53992:34889 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:51,236 DEBUG [RS:1;16b413a53992:42593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:51,236 INFO [RS:1;16b413a53992:42593 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:51,236 INFO [RS:1;16b413a53992:42593 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:51,338 INFO [RS:1;16b413a53992:42593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:31:51,338 INFO [RS:2;16b413a53992:37705 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:31:51,338 INFO [RS:0;16b413a53992:34889 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T16:31:51,342 INFO [RS:2;16b413a53992:37705 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C37705%2C1731342709786, suffix=, logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,37705,1731342709786, archiveDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs, maxLogs=32 2024-11-11T16:31:51,342 INFO [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C34889%2C1731342709618, suffix=, logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618, archiveDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs, maxLogs=32 2024-11-11T16:31:51,342 INFO [RS:1;16b413a53992:42593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C42593%2C1731342709733, suffix=, logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,42593,1731342709733, archiveDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs, maxLogs=32 2024-11-11T16:31:51,367 DEBUG [RS:1;16b413a53992:42593 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,42593,1731342709733/16b413a53992%2C42593%2C1731342709733.1731342711351, exclude list is [], retry=0 2024-11-11T16:31:51,371 DEBUG [RS:2;16b413a53992:37705 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,37705,1731342709786/16b413a53992%2C37705%2C1731342709786.1731342711350, exclude list is [], retry=0 2024-11-11T16:31:51,371 DEBUG [RS:0;16b413a53992:34889 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618/16b413a53992%2C34889%2C1731342709618.1731342711349, exclude list is [], retry=0 2024-11-11T16:31:51,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44767,DS-667085a3-27ed-4c40-b34e-ca8b04a145ec,DISK] 2024-11-11T16:31:51,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41631,DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765,DISK] 2024-11-11T16:31:51,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45373,DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e,DISK] 2024-11-11T16:31:51,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45373,DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e,DISK] 2024-11-11T16:31:51,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45373,DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e,DISK] 2024-11-11T16:31:51,378 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41631,DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765,DISK] 2024-11-11T16:31:51,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41631,DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765,DISK] 2024-11-11T16:31:51,379 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44767,DS-667085a3-27ed-4c40-b34e-ca8b04a145ec,DISK] 2024-11-11T16:31:51,380 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44767,DS-667085a3-27ed-4c40-b34e-ca8b04a145ec,DISK] 2024-11-11T16:31:51,462 INFO [RS:1;16b413a53992:42593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,42593,1731342709733/16b413a53992%2C42593%2C1731342709733.1731342711351 2024-11-11T16:31:51,463 DEBUG [RS:1;16b413a53992:42593 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:45699:45699),(127.0.0.1/127.0.0.1:33429:33429),(127.0.0.1/127.0.0.1:40731:40731)] 2024-11-11T16:31:51,465 INFO [RS:2;16b413a53992:37705 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,37705,1731342709786/16b413a53992%2C37705%2C1731342709786.1731342711350 2024-11-11T16:31:51,465 DEBUG [RS:2;16b413a53992:37705 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40731:40731),(127.0.0.1/127.0.0.1:45699:45699),(127.0.0.1/127.0.0.1:33429:33429)] 2024-11-11T16:31:51,466 INFO [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618/16b413a53992%2C34889%2C1731342709618.1731342711349 2024-11-11T16:31:51,466 DEBUG [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40731:40731),(127.0.0.1/127.0.0.1:45699:45699),(127.0.0.1/127.0.0.1:33429:33429)] 2024-11-11T16:31:51,724 DEBUG [16b413a53992:45041 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T16:31:51,733 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:31:51,740 INFO [16b413a53992:45041 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:31:51,740 INFO [16b413a53992:45041 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:31:51,740 INFO [16b413a53992:45041 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:31:51,740 DEBUG [16b413a53992:45041 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:31:51,749 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b413a53992,34889,1731342709618 2024-11-11T16:31:51,758 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,34889,1731342709618, state=OPENING 2024-11-11T16:31:51,765 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T16:31:51,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:51,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:51,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:51,768 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:51,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:51,768 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:51,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:51,769 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:51,771 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:31:51,773 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,34889,1731342709618}] 2024-11-11T16:31:51,951 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:31:51,954 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52317, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:31:51,974 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T16:31:51,974 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T16:31:51,975 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T16:31:51,979 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C34889%2C1731342709618.meta, suffix=.meta, logDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618, archiveDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs, maxLogs=32 2024-11-11T16:31:52,004 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618/16b413a53992%2C34889%2C1731342709618.meta.1731342711982.meta, exclude list is [], retry=0 2024-11-11T16:31:52,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45373,DS-90e944c2-a98c-4f29-a1ca-8ab940d0724e,DISK] 2024-11-11T16:31:52,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:41631,DS-53f905b2-b558-49ef-8b6d-b7ed5a6fd765,DISK] 2024-11-11T16:31:52,009 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44767,DS-667085a3-27ed-4c40-b34e-ca8b04a145ec,DISK] 2024-11-11T16:31:52,014 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/WALs/16b413a53992,34889,1731342709618/16b413a53992%2C34889%2C1731342709618.meta.1731342711982.meta 2024-11-11T16:31:52,014 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:40731:40731),(127.0.0.1/127.0.0.1:45699:45699),(127.0.0.1/127.0.0.1:33429:33429)] 2024-11-11T16:31:52,014 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:52,017 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T16:31:52,020 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T16:31:52,027 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T16:31:52,032 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T16:31:52,033 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:52,033 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T16:31:52,033 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T16:31:52,037 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:31:52,039 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:31:52,039 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:52,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:52,040 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:31:52,042 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:31:52,042 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:52,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:52,043 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:31:52,045 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:31:52,045 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:52,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:52,046 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:31:52,048 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:31:52,048 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:52,049 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:52,049 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:31:52,051 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740 2024-11-11T16:31:52,054 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740 2024-11-11T16:31:52,057 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:31:52,057 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:31:52,059 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:52,062 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:31:52,064 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60599004, jitterRate=-0.0970044732093811}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:52,065 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T16:31:52,066 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731342712034Writing region info on filesystem at 1731342712034Initializing all the Stores at 1731342712036 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342712037 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342712037Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342712037Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342712037Cleaning up temporary data from old regions at 1731342712058 (+21 ms)Running coprocessor post-open hooks at 1731342712065 (+7 ms)Region opened successfully at 1731342712066 (+1 ms) 2024-11-11T16:31:52,077 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731342711941 2024-11-11T16:31:52,092 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T16:31:52,093 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T16:31:52,097 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,34889,1731342709618 2024-11-11T16:31:52,100 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,34889,1731342709618, state=OPEN 2024-11-11T16:31:52,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:52,104 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:52,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:52,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:52,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:52,105 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:52,105 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b413a53992,34889,1731342709618 2024-11-11T16:31:52,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:52,108 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:52,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T16:31:52,115 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,34889,1731342709618 in 332 msec 2024-11-11T16:31:52,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T16:31:52,127 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0740 sec 2024-11-11T16:31:52,129 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:52,129 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T16:31:52,153 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:31:52,154 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,34889,1731342709618, seqNum=-1] 2024-11-11T16:31:52,184 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:31:52,187 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35845, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:31:52,252 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4330 sec 2024-11-11T16:31:52,252 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731342712252, completionTime=-1 2024-11-11T16:31:52,256 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T16:31:52,256 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T16:31:52,291 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-11T16:31:52,292 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731342772292 2024-11-11T16:31:52,292 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731342832292 2024-11-11T16:31:52,292 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 35 msec 2024-11-11T16:31:52,294 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-11T16:31:52,302 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,303 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,303 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,305 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-16b413a53992:45041, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,305 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,306 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,316 DEBUG [master/16b413a53992:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T16:31:52,342 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.466sec 2024-11-11T16:31:52,345 INFO [master/16b413a53992:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T16:31:52,346 INFO [master/16b413a53992:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T16:31:52,348 INFO [master/16b413a53992:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T16:31:52,348 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T16:31:52,349 INFO [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T16:31:52,350 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:52,350 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T16:31:52,358 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T16:31:52,360 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T16:31:52,360 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,45041,1731342708831-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:52,439 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b92a314, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:52,445 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T16:31:52,445 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T16:31:52,450 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 16b413a53992,45041,-1 for getting cluster id 2024-11-11T16:31:52,454 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T16:31:52,464 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '77dc20a1-7a9e-4e31-96f8-0c6460b561fe' 2024-11-11T16:31:52,467 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T16:31:52,467 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "77dc20a1-7a9e-4e31-96f8-0c6460b561fe" 2024-11-11T16:31:52,468 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d402895, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:52,468 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [16b413a53992,45041,-1] 2024-11-11T16:31:52,472 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T16:31:52,474 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:52,476 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46600, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T16:31:52,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb2644a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:52,480 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:31:52,488 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,34889,1731342709618, seqNum=-1] 2024-11-11T16:31:52,489 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:31:52,492 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55502, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:31:52,521 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=16b413a53992,45041,1731342708831 2024-11-11T16:31:52,527 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T16:31:52,533 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 16b413a53992,45041,1731342708831 2024-11-11T16:31:52,536 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7661c0ce 2024-11-11T16:31:52,538 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T16:31:52,543 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46614, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T16:31:52,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:31:52,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T16:31:52,571 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T16:31:52,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-11T16:31:52,577 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:52,582 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T16:31:52,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:52,607 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:52,607 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:52,630 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:42992 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:45373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42992 dst: /127.0.0.1:45373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:52,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775680_1021 (size=392) 2024-11-11T16:31:52,662 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:52,667 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 73d653654492e17a7a53fd4d0c705f9d, NAME => 'TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571 2024-11-11T16:31:52,677 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:52,677 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:52,687 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54850 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54850 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:52,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775664_1023 (size=51) 2024-11-11T16:31:52,697 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 73d653654492e17a7a53fd4d0c705f9d, disabling compactions & flushes 2024-11-11T16:31:52,698 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. after waiting 0 ms 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:52,698 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:52,698 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 73d653654492e17a7a53fd4d0c705f9d: Waiting for close lock at 1731342712698Disabling compacts and flushes for region at 1731342712698Disabling writes for close at 1731342712698Writing region close event to WAL at 1731342712698Closed at 1731342712698 2024-11-11T16:31:52,701 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T16:31:52,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:52,707 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731342712701"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731342712701"}]},"ts":"1731342712701"} 2024-11-11T16:31:52,715 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T16:31:52,721 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T16:31:52,724 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342712721"}]},"ts":"1731342712721"} 2024-11-11T16:31:52,733 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T16:31:52,733 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:31:52,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:31:52,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:31:52,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:31:52,735 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:31:52,736 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:31:52,736 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:31:52,736 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:31:52,736 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:31:52,736 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:31:52,736 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:31:52,738 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=73d653654492e17a7a53fd4d0c705f9d, ASSIGN}] 2024-11-11T16:31:52,742 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=73d653654492e17a7a53fd4d0c705f9d, ASSIGN 2024-11-11T16:31:52,745 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=73d653654492e17a7a53fd4d0c705f9d, ASSIGN; state=OFFLINE, location=16b413a53992,34889,1731342709618; forceNewPlan=false, retain=false 2024-11-11T16:31:52,898 INFO [16b413a53992:45041 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T16:31:52,899 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=73d653654492e17a7a53fd4d0c705f9d, regionState=OPENING, regionLocation=16b413a53992,34889,1731342709618 2024-11-11T16:31:52,904 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=73d653654492e17a7a53fd4d0c705f9d, ASSIGN because future has completed 2024-11-11T16:31:52,905 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73d653654492e17a7a53fd4d0c705f9d, server=16b413a53992,34889,1731342709618}] 2024-11-11T16:31:52,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:53,072 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,072 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 73d653654492e17a7a53fd4d0c705f9d, NAME => 'TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:53,073 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,073 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:53,073 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,073 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,076 INFO [StoreOpener-73d653654492e17a7a53fd4d0c705f9d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,079 INFO [StoreOpener-73d653654492e17a7a53fd4d0c705f9d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 73d653654492e17a7a53fd4d0c705f9d columnFamilyName cf 2024-11-11T16:31:53,080 DEBUG [StoreOpener-73d653654492e17a7a53fd4d0c705f9d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:53,081 INFO [StoreOpener-73d653654492e17a7a53fd4d0c705f9d-1 {}] regionserver.HStore(327): Store=73d653654492e17a7a53fd4d0c705f9d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:53,081 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,082 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,083 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,084 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,084 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,090 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,100 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:53,101 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 73d653654492e17a7a53fd4d0c705f9d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67309882, jitterRate=0.002995401620864868}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:31:53,101 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,103 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 73d653654492e17a7a53fd4d0c705f9d: Running coprocessor pre-open hook at 1731342713073Writing region info on filesystem at 1731342713073Initializing all the Stores at 1731342713075 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342713075Cleaning up temporary data from old regions at 1731342713084 (+9 ms)Running coprocessor post-open hooks at 1731342713101 (+17 ms)Region opened successfully at 1731342713102 (+1 ms) 2024-11-11T16:31:53,109 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d., pid=6, masterSystemTime=1731342713064 2024-11-11T16:31:53,114 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,114 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,120 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=73d653654492e17a7a53fd4d0c705f9d, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,34889,1731342709618 2024-11-11T16:31:53,126 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 73d653654492e17a7a53fd4d0c705f9d, server=16b413a53992,34889,1731342709618 because future has completed 2024-11-11T16:31:53,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T16:31:53,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 73d653654492e17a7a53fd4d0c705f9d, server=16b413a53992,34889,1731342709618 in 223 msec 2024-11-11T16:31:53,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T16:31:53,137 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=73d653654492e17a7a53fd4d0c705f9d, ASSIGN in 394 msec 2024-11-11T16:31:53,138 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T16:31:53,139 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342713138"}]},"ts":"1731342713138"} 2024-11-11T16:31:53,142 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T16:31:53,143 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T16:31:53,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 589 msec 2024-11-11T16:31:53,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:53,228 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T16:31:53,228 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T16:31:53,229 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:31:53,235 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T16:31:53,236 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:31:53,236 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T16:31:53,245 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d., hostname=16b413a53992,34889,1731342709618, seqNum=2] 2024-11-11T16:31:53,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-11T16:31:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-11T16:31:53,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:53,265 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T16:31:53,268 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T16:31:53,269 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T16:31:53,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:53,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=34889 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T16:31:53,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,446 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 73d653654492e17a7a53fd4d0c705f9d 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T16:31:53,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-11T16:31:53,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-11T16:31:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-11T16:31:53,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-11T16:31:53,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-11T16:31:53,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-11T16:31:53,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-11T16:31:53,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-11T16:31:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/.tmp/cf/6e95d4a21d6947428d6c9827d9bc32bd is 36, key is row/cf:cq/1731342713248/Put/seqid=0 2024-11-11T16:31:53,529 WARN [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:53,529 WARN [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:53,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775709_1013 (size=1321) 2024-11-11T16:31:53,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775708_1013 (size=1321) 2024-11-11T16:31:53,555 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2125861184_22 at /127.0.0.1:43048 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:45373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43048 dst: /127.0.0.1:45373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:53,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775648_1025 (size=4787) 2024-11-11T16:31:53,569 WARN [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:53,570 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/.tmp/cf/6e95d4a21d6947428d6c9827d9bc32bd 2024-11-11T16:31:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:53,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/.tmp/cf/6e95d4a21d6947428d6c9827d9bc32bd as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/cf/6e95d4a21d6947428d6c9827d9bc32bd 2024-11-11T16:31:53,645 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/cf/6e95d4a21d6947428d6c9827d9bc32bd, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T16:31:53,658 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 73d653654492e17a7a53fd4d0c705f9d in 212ms, sequenceid=5, compaction requested=false 2024-11-11T16:31:53,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-11T16:31:53,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 73d653654492e17a7a53fd4d0c705f9d: 2024-11-11T16:31:53,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T16:31:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T16:31:53,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T16:31:53,687 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 404 msec 2024-11-11T16:31:53,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 434 msec 2024-11-11T16:31:53,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45041 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:53,897 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T16:31:53,914 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T16:31:53,915 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:31:53,915 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:53,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,920 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,920 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T16:31:53,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T16:31:53,921 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1441288993, stopped=false 2024-11-11T16:31:53,921 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=16b413a53992,45041,1731342708831 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:53,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:53,923 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:31:53,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:53,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:53,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:53,925 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:31:53,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:53,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:53,926 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:53,926 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:53,926 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,926 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,34889,1731342709618' ***** 2024-11-11T16:31:53,926 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:53,927 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,42593,1731342709733' ***** 2024-11-11T16:31:53,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:53,927 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,37705,1731342709786' ***** 2024-11-11T16:31:53,927 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:53,927 INFO [RS:1;16b413a53992:42593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:53,927 INFO [RS:2;16b413a53992:37705 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:53,927 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:53,927 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:53,927 INFO [RS:2;16b413a53992:37705 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:53,927 INFO [RS:1;16b413a53992:42593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:53,928 INFO [RS:1;16b413a53992:42593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:53,928 INFO [RS:2;16b413a53992:37705 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:53,928 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,42593,1731342709733 2024-11-11T16:31:53,928 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,37705,1731342709786 2024-11-11T16:31:53,928 INFO [RS:1;16b413a53992:42593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:53,928 INFO [RS:2;16b413a53992:37705 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:53,928 INFO [RS:1;16b413a53992:42593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;16b413a53992:42593. 2024-11-11T16:31:53,928 INFO [RS:2;16b413a53992:37705 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;16b413a53992:37705. 2024-11-11T16:31:53,928 DEBUG [RS:1;16b413a53992:42593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:53,928 DEBUG [RS:1;16b413a53992:42593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,928 DEBUG [RS:2;16b413a53992:37705 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:53,928 DEBUG [RS:2;16b413a53992:37705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,928 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,42593,1731342709733; all regions closed. 2024-11-11T16:31:53,928 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,37705,1731342709786; all regions closed. 2024-11-11T16:31:53,929 INFO [RS:0;16b413a53992:34889 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:53,929 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:53,929 INFO [RS:0;16b413a53992:34889 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:53,929 INFO [RS:0;16b413a53992:34889 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:53,929 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(3091): Received CLOSE for 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,34889,1731342709618 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;16b413a53992:34889. 2024-11-11T16:31:53,930 DEBUG [RS:0;16b413a53992:34889 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:53,930 DEBUG [RS:0;16b413a53992:34889 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:53,930 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 73d653654492e17a7a53fd4d0c705f9d, disabling compactions & flushes 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:53,930 INFO [RS:0;16b413a53992:34889 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:53,930 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,931 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,931 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T16:31:53,931 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. after waiting 0 ms 2024-11-11T16:31:53,931 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,933 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-11T16:31:53,933 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 73d653654492e17a7a53fd4d0c705f9d=TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d.} 2024-11-11T16:31:53,933 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:31:53,933 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:31:53,933 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:31:53,933 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:31:53,933 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:31:53,933 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 73d653654492e17a7a53fd4d0c705f9d 2024-11-11T16:31:53,933 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-11T16:31:53,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_1073741826_1016 (size=93) 2024-11-11T16:31:53,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_1073741826_1016 (size=93) 2024-11-11T16:31:53,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741826_1016 (size=93) 2024-11-11T16:31:53,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_1073741827_1017 (size=93) 2024-11-11T16:31:53,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741827_1017 (size=93) 2024-11-11T16:31:53,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_1073741827_1017 (size=93) 2024-11-11T16:31:53,952 DEBUG [RS:1;16b413a53992:42593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs 2024-11-11T16:31:53,953 INFO [RS:1;16b413a53992:42593 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C42593%2C1731342709733:(num 1731342711351) 2024-11-11T16:31:53,953 DEBUG [RS:1;16b413a53992:42593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,953 INFO [RS:1;16b413a53992:42593 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:53,953 INFO [RS:1;16b413a53992:42593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:53,953 INFO [RS:1;16b413a53992:42593 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:53,954 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:53,954 INFO [RS:1;16b413a53992:42593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:53,954 INFO [RS:1;16b413a53992:42593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:53,954 INFO [RS:1;16b413a53992:42593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:53,954 INFO [RS:1;16b413a53992:42593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:53,955 INFO [RS:1;16b413a53992:42593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42593 2024-11-11T16:31:53,959 DEBUG [RS:2;16b413a53992:37705 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs 2024-11-11T16:31:53,959 INFO [RS:2;16b413a53992:37705 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C37705%2C1731342709786:(num 1731342711350) 2024-11-11T16:31:53,959 DEBUG [RS:2;16b413a53992:37705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:53,959 INFO [RS:2;16b413a53992:37705 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:53,959 INFO [RS:2;16b413a53992:37705 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:53,960 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:53,960 INFO [RS:2;16b413a53992:37705 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37705 2024-11-11T16:31:53,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:53,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,42593,1731342709733 2024-11-11T16:31:53,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,37705,1731342709786 2024-11-11T16:31:53,964 INFO [RS:2;16b413a53992:37705 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:53,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,37705,1731342709786] 2024-11-11T16:31:53,966 INFO [RS:1;16b413a53992:42593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:53,968 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,37705,1731342709786 already deleted, retry=false 2024-11-11T16:31:53,968 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,37705,1731342709786 expired; onlineServers=2 2024-11-11T16:31:53,968 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,42593,1731342709733] 2024-11-11T16:31:53,970 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,42593,1731342709733 already deleted, retry=false 2024-11-11T16:31:53,970 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,42593,1731342709733 expired; onlineServers=1 2024-11-11T16:31:53,974 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/info/386c750754b64876bd045bd9608566b3 is 153, key is TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d./info:regioninfo/1731342713120/Put/seqid=0 2024-11-11T16:31:53,977 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:53,977 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:53,980 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/default/TestHBaseWalOnEC/73d653654492e17a7a53fd4d0c705f9d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T16:31:53,981 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,981 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 73d653654492e17a7a53fd4d0c705f9d: Waiting for close lock at 1731342713930Running coprocessor pre-close hooks at 1731342713930Disabling compacts and flushes for region at 1731342713930Disabling writes for close at 1731342713931 (+1 ms)Writing region close event to WAL at 1731342713950 (+19 ms)Running coprocessor post-close hooks at 1731342713981 (+31 ms)Closed at 1731342713981 2024-11-11T16:31:53,982 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731342712544.73d653654492e17a7a53fd4d0c705f9d. 2024-11-11T16:31:53,984 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2125861184_22 at /127.0.0.1:43066 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:45373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43066 dst: /127.0.0.1:45373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:53,986 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:53,986 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:53,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775632_1027 (size=6637) 2024-11-11T16:31:53,989 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:53,989 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/info/386c750754b64876bd045bd9608566b3 2024-11-11T16:31:53,993 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:54,027 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/ns/6f47b193c2a94467bf23d35cd86c0319 is 43, key is default/ns:d/1731342712221/Put/seqid=0 2024-11-11T16:31:54,030 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,031 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,039 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2125861184_22 at /127.0.0.1:54886 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54886 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775616_1029 (size=5153) 2024-11-11T16:31:54,045 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,045 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/ns/6f47b193c2a94467bf23d35cd86c0319 2024-11-11T16:31:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42593-0x1002faf37cd0002, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,070 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,070 INFO [RS:2;16b413a53992:37705 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:54,070 INFO [RS:1;16b413a53992:42593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:54,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37705-0x1002faf37cd0003, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,071 INFO [RS:2;16b413a53992:37705 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,37705,1731342709786; zookeeper connection closed. 2024-11-11T16:31:54,071 INFO [RS:1;16b413a53992:42593 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,42593,1731342709733; zookeeper connection closed. 2024-11-11T16:31:54,073 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@fefe3c3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@fefe3c3 2024-11-11T16:31:54,078 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7a96f5d0 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7a96f5d0 2024-11-11T16:31:54,096 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/table/36c06fdd759445bd9cf90207a6e4935d is 52, key is TestHBaseWalOnEC/table:state/1731342713138/Put/seqid=0 2024-11-11T16:31:54,106 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,106 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-2125861184_22 at /127.0.0.1:54922 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54922 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775600_1031 (size=5249) 2024-11-11T16:31:54,127 WARN [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,128 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/table/36c06fdd759445bd9cf90207a6e4935d 2024-11-11T16:31:54,133 DEBUG [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T16:31:54,144 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/info/386c750754b64876bd045bd9608566b3 as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/info/386c750754b64876bd045bd9608566b3 2024-11-11T16:31:54,168 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/info/386c750754b64876bd045bd9608566b3, entries=10, sequenceid=11, filesize=6.5 K 2024-11-11T16:31:54,171 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/ns/6f47b193c2a94467bf23d35cd86c0319 as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/ns/6f47b193c2a94467bf23d35cd86c0319 2024-11-11T16:31:54,181 INFO [regionserver/16b413a53992:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T16:31:54,181 INFO [regionserver/16b413a53992:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T16:31:54,188 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/ns/6f47b193c2a94467bf23d35cd86c0319, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T16:31:54,190 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/.tmp/table/36c06fdd759445bd9cf90207a6e4935d as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/table/36c06fdd759445bd9cf90207a6e4935d 2024-11-11T16:31:54,202 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/table/36c06fdd759445bd9cf90207a6e4935d, entries=2, sequenceid=11, filesize=5.1 K 2024-11-11T16:31:54,204 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 271ms, sequenceid=11, compaction requested=false 2024-11-11T16:31:54,204 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T16:31:54,218 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T16:31:54,219 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T16:31:54,219 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:54,219 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342713933Running coprocessor pre-close hooks at 1731342713933Disabling compacts and flushes for region at 1731342713933Disabling writes for close at 1731342713933Obtaining lock to block concurrent updates at 1731342713933Preparing flush snapshotting stores in 1588230740 at 1731342713933Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731342713934 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731342713936 (+2 ms)Flushing 1588230740/info: creating writer at 1731342713936Flushing 1588230740/info: appending metadata at 1731342713970 (+34 ms)Flushing 1588230740/info: closing flushed file at 1731342713970Flushing 1588230740/ns: creating writer at 1731342714001 (+31 ms)Flushing 1588230740/ns: appending metadata at 1731342714026 (+25 ms)Flushing 1588230740/ns: closing flushed file at 1731342714026Flushing 1588230740/table: creating writer at 1731342714057 (+31 ms)Flushing 1588230740/table: appending metadata at 1731342714093 (+36 ms)Flushing 1588230740/table: closing flushed file at 1731342714093Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@628f2b79: reopening flushed file at 1731342714140 (+47 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f7d7a07: reopening flushed file at 1731342714169 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@8d82a5f: reopening flushed file at 1731342714188 (+19 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 271ms, sequenceid=11, compaction requested=false at 1731342714204 (+16 ms)Writing region close event to WAL at 1731342714209 (+5 ms)Running coprocessor post-close hooks at 1731342714219 (+10 ms)Closed at 1731342714219 2024-11-11T16:31:54,220 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:54,334 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,34889,1731342709618; all regions closed. 2024-11-11T16:31:54,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_1073741829_1019 (size=2751) 2024-11-11T16:31:54,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_1073741829_1019 (size=2751) 2024-11-11T16:31:54,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741829_1019 (size=2751) 2024-11-11T16:31:54,342 DEBUG [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs 2024-11-11T16:31:54,342 INFO [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C34889%2C1731342709618.meta:.meta(num 1731342711982) 2024-11-11T16:31:54,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741828_1018 (size=1298) 2024-11-11T16:31:54,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_1073741828_1018 (size=1298) 2024-11-11T16:31:54,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_1073741828_1018 (size=1298) 2024-11-11T16:31:54,349 DEBUG [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/oldWALs 2024-11-11T16:31:54,349 INFO [RS:0;16b413a53992:34889 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 16b413a53992%2C34889%2C1731342709618:(num 1731342711349) 2024-11-11T16:31:54,349 DEBUG [RS:0;16b413a53992:34889 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:54,350 INFO [RS:0;16b413a53992:34889 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:54,350 INFO [RS:0;16b413a53992:34889 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:54,350 INFO [RS:0;16b413a53992:34889 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:54,350 INFO [RS:0;16b413a53992:34889 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:54,350 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:54,350 INFO [RS:0;16b413a53992:34889 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34889 2024-11-11T16:31:54,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,34889,1731342709618 2024-11-11T16:31:54,353 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:54,353 INFO [RS:0;16b413a53992:34889 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:54,354 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,34889,1731342709618] 2024-11-11T16:31:54,356 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,34889,1731342709618 already deleted, retry=false 2024-11-11T16:31:54,356 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,34889,1731342709618 expired; onlineServers=0 2024-11-11T16:31:54,357 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '16b413a53992,45041,1731342708831' ***** 2024-11-11T16:31:54,357 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T16:31:54,357 INFO [M:0;16b413a53992:45041 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:54,357 INFO [M:0;16b413a53992:45041 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:54,357 DEBUG [M:0;16b413a53992:45041 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T16:31:54,357 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T16:31:54,357 DEBUG [M:0;16b413a53992:45041 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T16:31:54,357 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342710968 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342710968,5,FailOnTimeoutGroup] 2024-11-11T16:31:54,358 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342710969 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342710969,5,FailOnTimeoutGroup] 2024-11-11T16:31:54,358 INFO [M:0;16b413a53992:45041 {}] hbase.ChoreService(370): Chore service for: master/16b413a53992:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:54,358 INFO [M:0;16b413a53992:45041 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:54,358 DEBUG [M:0;16b413a53992:45041 {}] master.HMaster(1795): Stopping service threads 2024-11-11T16:31:54,358 INFO [M:0;16b413a53992:45041 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T16:31:54,358 INFO [M:0;16b413a53992:45041 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:31:54,359 INFO [M:0;16b413a53992:45041 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T16:31:54,359 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T16:31:54,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:54,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:54,360 DEBUG [M:0;16b413a53992:45041 {}] zookeeper.ZKUtil(347): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T16:31:54,360 WARN [M:0;16b413a53992:45041 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T16:31:54,361 INFO [M:0;16b413a53992:45041 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/.lastflushedseqids 2024-11-11T16:31:54,374 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,374 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,377 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54944 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775584_1032] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54944 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775584_1033 (size=127) 2024-11-11T16:31:54,384 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,384 INFO [M:0;16b413a53992:45041 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T16:31:54,384 INFO [M:0;16b413a53992:45041 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T16:31:54,384 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:31:54,384 INFO [M:0;16b413a53992:45041 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:54,384 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:54,384 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:31:54,384 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:54,385 INFO [M:0;16b413a53992:45041 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.84 KB heapSize=34.13 KB 2024-11-11T16:31:54,406 DEBUG [M:0;16b413a53992:45041 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb68645ff4304c83933e4b3de901506b is 82, key is hbase:meta,,1/info:regioninfo/1731342712096/Put/seqid=0 2024-11-11T16:31:54,408 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,408 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,413 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:54956 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:41631:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54956 dst: /127.0.0.1:41631 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_-9223372036854775568_1035 (size=5672) 2024-11-11T16:31:54,418 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,418 INFO [M:0;16b413a53992:45041 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb68645ff4304c83933e4b3de901506b 2024-11-11T16:31:54,446 DEBUG [M:0;16b413a53992:45041 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86027b87b652483e9d19ae6e88816370 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731342713145/Put/seqid=0 2024-11-11T16:31:54,449 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,449 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,454 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:43076 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:45373:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:43076 dst: /127.0.0.1:45373 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,455 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,456 INFO [RS:0;16b413a53992:34889 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:54,456 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34889-0x1002faf37cd0001, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,456 INFO [RS:0;16b413a53992:34889 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,34889,1731342709618; zookeeper connection closed. 2024-11-11T16:31:54,457 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3d4da152 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3d4da152 2024-11-11T16:31:54,457 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T16:31:54,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_-9223372036854775552_1037 (size=6440) 2024-11-11T16:31:54,460 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,461 INFO [M:0;16b413a53992:45041 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.15 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86027b87b652483e9d19ae6e88816370 2024-11-11T16:31:54,498 DEBUG [M:0;16b413a53992:45041 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/293e201a98304f2d9dda2a82840e9ba6 is 69, key is 16b413a53992,34889,1731342709618/rs:state/1731342711088/Put/seqid=0 2024-11-11T16:31:54,501 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,501 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T16:31:54,504 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1000866338_22 at /127.0.0.1:59846 [Receiving block BP-1543572805-172.17.0.2-1731342705390:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:44767:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59846 dst: /127.0.0.1:44767 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T16:31:54,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_-9223372036854775536_1039 (size=5294) 2024-11-11T16:31:54,514 WARN [M:0;16b413a53992:45041 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T16:31:54,514 INFO [M:0;16b413a53992:45041 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/293e201a98304f2d9dda2a82840e9ba6 2024-11-11T16:31:54,525 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/bb68645ff4304c83933e4b3de901506b as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb68645ff4304c83933e4b3de901506b 2024-11-11T16:31:54,534 INFO [M:0;16b413a53992:45041 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/bb68645ff4304c83933e4b3de901506b, entries=8, sequenceid=72, filesize=5.5 K 2024-11-11T16:31:54,536 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/86027b87b652483e9d19ae6e88816370 as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/86027b87b652483e9d19ae6e88816370 2024-11-11T16:31:54,545 INFO [M:0;16b413a53992:45041 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/86027b87b652483e9d19ae6e88816370, entries=8, sequenceid=72, filesize=6.3 K 2024-11-11T16:31:54,547 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/293e201a98304f2d9dda2a82840e9ba6 as hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/293e201a98304f2d9dda2a82840e9ba6 2024-11-11T16:31:54,557 INFO [M:0;16b413a53992:45041 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/293e201a98304f2d9dda2a82840e9ba6, entries=3, sequenceid=72, filesize=5.2 K 2024-11-11T16:31:54,559 INFO [M:0;16b413a53992:45041 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=72, compaction requested=false 2024-11-11T16:31:54,561 INFO [M:0;16b413a53992:45041 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:54,561 DEBUG [M:0;16b413a53992:45041 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342714384Disabling compacts and flushes for region at 1731342714384Disabling writes for close at 1731342714384Obtaining lock to block concurrent updates at 1731342714385 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731342714385Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27480, getHeapSize=34880, getOffHeapSize=0, getCellsCount=85 at 1731342714385Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731342714386 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731342714387 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731342714405 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731342714405Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731342714427 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731342714445 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731342714446 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731342714471 (+25 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731342714497 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731342714497Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3fbea8c: reopening flushed file at 1731342714523 (+26 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7124f23b: reopening flushed file at 1731342714534 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cb82513: reopening flushed file at 1731342714546 (+12 ms)Finished flush of dataSize ~26.84 KB/27480, heapSize ~33.83 KB/34640, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 175ms, sequenceid=72, compaction requested=false at 1731342714559 (+13 ms)Writing region close event to WAL at 1731342714561 (+2 ms)Closed at 1731342714561 2024-11-11T16:31:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41631 is added to blk_1073741825_1011 (size=32683) 2024-11-11T16:31:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44767 is added to blk_1073741825_1011 (size=32683) 2024-11-11T16:31:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45373 is added to blk_1073741825_1011 (size=32683) 2024-11-11T16:31:54,566 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:54,566 INFO [M:0;16b413a53992:45041 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T16:31:54,566 INFO [M:0;16b413a53992:45041 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45041 2024-11-11T16:31:54,566 INFO [M:0;16b413a53992:45041 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:54,669 INFO [M:0;16b413a53992:45041 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:54,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45041-0x1002faf37cd0000, quorum=127.0.0.1:57850, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:54,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f750918{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:54,676 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@86bf2a7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:54,676 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:54,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1023f385{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:54,676 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7e1f796{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:54,681 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:54,681 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:54,681 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1543572805-172.17.0.2-1731342705390 (Datanode Uuid 5a0763cd-2f13-4b3d-923e-cfb2ea4c63ea) service to localhost/127.0.0.1:37187 2024-11-11T16:31:54,681 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:54,682 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data5/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,683 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data6/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,683 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:54,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@26b068f7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:54,690 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5739b847{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:54,690 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:54,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c2c5be{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:54,690 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a91ec1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:54,695 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:54,695 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:54,695 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:54,695 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1543572805-172.17.0.2-1731342705390 (Datanode Uuid e66bc071-0f56-4cc2-9193-d9231e8ec122) service to localhost/127.0.0.1:37187 2024-11-11T16:31:54,696 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data3/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,696 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data4/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,697 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:54,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e705dc8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:54,707 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2ad1569e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:54,707 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:54,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@17f1c7fc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:54,707 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@32fec40a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:54,709 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:54,709 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:54,709 WARN [BP-1543572805-172.17.0.2-1731342705390 heartbeating to localhost/127.0.0.1:37187 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1543572805-172.17.0.2-1731342705390 (Datanode Uuid 82b774a6-c434-4b51-a970-9849cab83a86) service to localhost/127.0.0.1:37187 2024-11-11T16:31:54,709 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:54,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data1/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,710 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/cluster_dfae65db-0830-0146-ad87-92add71f3f3b/data/data2/current/BP-1543572805-172.17.0.2-1731342705390 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:54,710 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:54,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@76e4c45c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:31:54,724 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4637aff6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:54,724 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:54,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@383d55e4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:54,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21b7d177{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:54,739 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T16:31:54,782 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T16:31:54,795 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=88 (was 158), OpenFileDescriptor=447 (was 391) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=498 (was 507), ProcessCount=11 (was 11), AvailableMemoryMB=2790 (was 3356) 2024-11-11T16:31:54,803 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=88, OpenFileDescriptor=447, MaxFileDescriptor=1048576, SystemLoadAverage=498, ProcessCount=11, AvailableMemoryMB=2790 2024-11-11T16:31:54,803 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.log.dir so I do NOT create it in target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/a3a5602a-3633-c2f5-59da-ede9e07c7a1e/hadoop.tmp.dir so I do NOT create it in target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47, deleteOnExit=true 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/test.cache.data in system properties and HBase conf 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T16:31:54,804 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir in system properties and HBase conf 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-11T16:31:54,805 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T16:31:54,805 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/nfs.dump.dir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/java.io.tmpdir in system properties and HBase conf 2024-11-11T16:31:54,806 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T16:31:54,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T16:31:54,807 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T16:31:54,915 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:54,922 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:54,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:54,939 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:54,939 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:31:54,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:54,942 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@758ed3c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:54,945 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41ab5cc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:55,117 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@67bdd5ed{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/java.io.tmpdir/jetty-localhost-46243-hadoop-hdfs-3_4_1-tests_jar-_-any-17369704847363890736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:31:55,118 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2d952814{HTTP/1.1, (http/1.1)}{localhost:46243} 2024-11-11T16:31:55,119 INFO [Time-limited test {}] server.Server(415): Started @12304ms 2024-11-11T16:31:55,341 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:55,352 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:55,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:55,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:55,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T16:31:55,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c8d1a40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:55,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78ab2b00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:55,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@24e08cba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/java.io.tmpdir/jetty-localhost-42563-hadoop-hdfs-3_4_1-tests_jar-_-any-7834178927633154407/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:55,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45b09adf{HTTP/1.1, (http/1.1)}{localhost:42563} 2024-11-11T16:31:55,534 INFO [Time-limited test {}] server.Server(415): Started @12720ms 2024-11-11T16:31:55,536 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:55,641 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:55,648 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:55,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:55,657 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:55,658 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T16:31:55,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3236f207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:55,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@55791d09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:55,730 WARN [Thread-526 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data1/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:55,729 WARN [Thread-527 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data2/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:55,789 WARN [Thread-505 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x817677710ec29f12 with lease ID 0xac4fc849dfe2dac6: Processing first storage report for DS-b6eefe2b-92ce-48bd-b1d7-90d9e39f8c15 from datanode DatanodeRegistration(127.0.0.1:45013, datanodeUuid=6b338e59-e471-4779-b2cc-861ef2d6a779, infoPort=44329, infoSecurePort=0, ipcPort=44967, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x817677710ec29f12 with lease ID 0xac4fc849dfe2dac6: from storage DS-b6eefe2b-92ce-48bd-b1d7-90d9e39f8c15 node DatanodeRegistration(127.0.0.1:45013, datanodeUuid=6b338e59-e471-4779-b2cc-861ef2d6a779, infoPort=44329, infoSecurePort=0, ipcPort=44967, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:31:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x817677710ec29f12 with lease ID 0xac4fc849dfe2dac6: Processing first storage report for DS-0e749744-c390-4b0c-84f6-088069ee3bd1 from datanode DatanodeRegistration(127.0.0.1:45013, datanodeUuid=6b338e59-e471-4779-b2cc-861ef2d6a779, infoPort=44329, infoSecurePort=0, ipcPort=44967, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:55,799 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x817677710ec29f12 with lease ID 0xac4fc849dfe2dac6: from storage DS-0e749744-c390-4b0c-84f6-088069ee3bd1 node DatanodeRegistration(127.0.0.1:45013, datanodeUuid=6b338e59-e471-4779-b2cc-861ef2d6a779, infoPort=44329, infoSecurePort=0, ipcPort=44967, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:55,820 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@152462a3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/java.io.tmpdir/jetty-localhost-39445-hadoop-hdfs-3_4_1-tests_jar-_-any-12890271452373102388/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:55,821 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2e4c23ba{HTTP/1.1, (http/1.1)}{localhost:39445} 2024-11-11T16:31:55,821 INFO [Time-limited test {}] server.Server(415): Started @13006ms 2024-11-11T16:31:55,823 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:55,953 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T16:31:55,969 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T16:31:55,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T16:31:55,982 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T16:31:55,982 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T16:31:55,983 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12d3303{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,AVAILABLE} 2024-11-11T16:31:55,984 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b7fa3ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T16:31:56,021 WARN [Thread-561 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data3/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:56,022 WARN [Thread-562 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data4/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:56,109 WARN [Thread-541 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea9dfacfa07e8b9 with lease ID 0xac4fc849dfe2dac7: Processing first storage report for DS-199f5429-28d5-4140-8729-7492e975b8df from datanode DatanodeRegistration(127.0.0.1:37785, datanodeUuid=064eb470-0b4e-4bd5-b241-3d694051aa5d, infoPort=34285, infoSecurePort=0, ipcPort=39737, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea9dfacfa07e8b9 with lease ID 0xac4fc849dfe2dac7: from storage DS-199f5429-28d5-4140-8729-7492e975b8df node DatanodeRegistration(127.0.0.1:37785, datanodeUuid=064eb470-0b4e-4bd5-b241-3d694051aa5d, infoPort=34285, infoSecurePort=0, ipcPort=39737, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xea9dfacfa07e8b9 with lease ID 0xac4fc849dfe2dac7: Processing first storage report for DS-0c2f81ba-86af-4785-96dd-a0781afbf152 from datanode DatanodeRegistration(127.0.0.1:37785, datanodeUuid=064eb470-0b4e-4bd5-b241-3d694051aa5d, infoPort=34285, infoSecurePort=0, ipcPort=39737, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:56,121 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xea9dfacfa07e8b9 with lease ID 0xac4fc849dfe2dac7: from storage DS-0c2f81ba-86af-4785-96dd-a0781afbf152 node DatanodeRegistration(127.0.0.1:37785, datanodeUuid=064eb470-0b4e-4bd5-b241-3d694051aa5d, infoPort=34285, infoSecurePort=0, ipcPort=39737, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:56,175 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f9b588c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/java.io.tmpdir/jetty-localhost-46435-hadoop-hdfs-3_4_1-tests_jar-_-any-18255451288659559306/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:56,177 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38e5384{HTTP/1.1, (http/1.1)}{localhost:46435} 2024-11-11T16:31:56,177 INFO [Time-limited test {}] server.Server(415): Started @13362ms 2024-11-11T16:31:56,178 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T16:31:56,314 WARN [Thread-587 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data5/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:56,316 WARN [Thread-588 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data6/current/BP-585791280-172.17.0.2-1731342714841/current, will proceed with Du for space computation calculation, 2024-11-11T16:31:56,381 WARN [Thread-576 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T16:31:56,392 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x992bce891826b5ca with lease ID 0xac4fc849dfe2dac8: Processing first storage report for DS-ebb12263-079f-4658-bd4a-3a6b6e8ef9d5 from datanode DatanodeRegistration(127.0.0.1:38687, datanodeUuid=80c81122-302d-44c9-991f-9e7a335e633a, infoPort=36647, infoSecurePort=0, ipcPort=45333, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x992bce891826b5ca with lease ID 0xac4fc849dfe2dac8: from storage DS-ebb12263-079f-4658-bd4a-3a6b6e8ef9d5 node DatanodeRegistration(127.0.0.1:38687, datanodeUuid=80c81122-302d-44c9-991f-9e7a335e633a, infoPort=36647, infoSecurePort=0, ipcPort=45333, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T16:31:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x992bce891826b5ca with lease ID 0xac4fc849dfe2dac8: Processing first storage report for DS-62847a19-397f-45d1-b23d-129487491928 from datanode DatanodeRegistration(127.0.0.1:38687, datanodeUuid=80c81122-302d-44c9-991f-9e7a335e633a, infoPort=36647, infoSecurePort=0, ipcPort=45333, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841) 2024-11-11T16:31:56,393 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x992bce891826b5ca with lease ID 0xac4fc849dfe2dac8: from storage DS-62847a19-397f-45d1-b23d-129487491928 node DatanodeRegistration(127.0.0.1:38687, datanodeUuid=80c81122-302d-44c9-991f-9e7a335e633a, infoPort=36647, infoSecurePort=0, ipcPort=45333, storageInfo=lv=-57;cid=testClusterID;nsid=1805752748;c=1731342714841), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T16:31:56,470 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4 2024-11-11T16:31:56,477 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/zookeeper_0, clientPort=56960, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T16:31:56,481 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56960 2024-11-11T16:31:56,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,483 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:31:56,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:31:56,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741825_1001 (size=7) 2024-11-11T16:31:56,528 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 with version=8 2024-11-11T16:31:56,528 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37187/user/jenkins/test-data/576d9825-90b3-9857-010e-0d0a682ee571/hbase-staging 2024-11-11T16:31:56,531 INFO [Time-limited test {}] client.ConnectionUtils(128): master/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-11T16:31:56,531 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:56,533 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35335 2024-11-11T16:31:56,535 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35335 connecting to ZooKeeper ensemble=127.0.0.1:56960 2024-11-11T16:31:56,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:353350x0, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:56,543 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35335-0x1002faf58d70000 connected 2024-11-11T16:31:56,561 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,563 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,565 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:56,565 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9, hbase.cluster.distributed=false 2024-11-11T16:31:56,567 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:56,567 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35335 2024-11-11T16:31:56,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35335 2024-11-11T16:31:56,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35335 2024-11-11T16:31:56,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35335 2024-11-11T16:31:56,569 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35335 2024-11-11T16:31:56,585 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:56,585 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:56,586 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42345 2024-11-11T16:31:56,587 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42345 connecting to ZooKeeper ensemble=127.0.0.1:56960 2024-11-11T16:31:56,588 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,590 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,595 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:423450x0, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:56,595 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42345-0x1002faf58d70001 connected 2024-11-11T16:31:56,596 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:56,596 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:56,597 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:56,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:56,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:56,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42345 2024-11-11T16:31:56,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42345 2024-11-11T16:31:56,600 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42345 2024-11-11T16:31:56,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42345 2024-11-11T16:31:56,601 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42345 2024-11-11T16:31:56,618 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:56,618 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:56,619 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:56,619 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33415 2024-11-11T16:31:56,621 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33415 connecting to ZooKeeper ensemble=127.0.0.1:56960 2024-11-11T16:31:56,622 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,624 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,629 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:334150x0, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:56,630 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:56,630 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33415-0x1002faf58d70002 connected 2024-11-11T16:31:56,630 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:56,631 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:56,632 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:56,633 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:56,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-11T16:31:56,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33415 2024-11-11T16:31:56,634 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33415 2024-11-11T16:31:56,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-11T16:31:56,635 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33415 2024-11-11T16:31:56,651 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/16b413a53992:0 server-side Connection retries=45 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T16:31:56,651 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T16:31:56,652 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T16:31:56,653 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38681 2024-11-11T16:31:56,655 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38681 connecting to ZooKeeper ensemble=127.0.0.1:56960 2024-11-11T16:31:56,656 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,663 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:386810x0, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T16:31:56,663 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38681-0x1002faf58d70003 connected 2024-11-11T16:31:56,663 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:56,664 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T16:31:56,665 DEBUG [Time-limited test {}] mob.MobFileCache(123): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T16:31:56,665 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T16:31:56,666 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T16:31:56,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38681 2024-11-11T16:31:56,667 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38681 2024-11-11T16:31:56,668 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38681 2024-11-11T16:31:56,669 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38681 2024-11-11T16:31:56,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38681 2024-11-11T16:31:56,692 DEBUG [M:0;16b413a53992:35335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;16b413a53992:35335 2024-11-11T16:31:56,692 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/16b413a53992,35335,1731342716530 2024-11-11T16:31:56,694 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,695 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/16b413a53992,35335,1731342716530 2024-11-11T16:31:56,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:56,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:56,697 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:56,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,698 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,699 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T16:31:56,699 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/16b413a53992,35335,1731342716530 from backup master directory 2024-11-11T16:31:56,701 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/16b413a53992,35335,1731342716530 2024-11-11T16:31:56,701 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T16:31:56,701 WARN [master/16b413a53992:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:56,702 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=16b413a53992,35335,1731342716530 2024-11-11T16:31:56,708 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/hbase.id] with ID: 7d8fda76-967c-454f-80b5-cde91e46004f 2024-11-11T16:31:56,708 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/.tmp/hbase.id 2024-11-11T16:31:56,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:31:56,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:31:56,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741826_1002 (size=42) 2024-11-11T16:31:56,722 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/.tmp/hbase.id]:[hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/hbase.id] 2024-11-11T16:31:56,739 INFO [master/16b413a53992:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T16:31:56,739 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-11T16:31:56,741 INFO [master/16b413a53992:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-11T16:31:56,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,743 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:31:56,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:31:56,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741827_1003 (size=196) 2024-11-11T16:31:56,757 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:31:56,758 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T16:31:56,758 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T16:31:56,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:31:56,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:31:56,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741828_1004 (size=1189) 2024-11-11T16:31:56,776 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store 2024-11-11T16:31:56,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:31:56,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:31:56,791 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:56,791 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:31:56,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741829_1005 (size=34) 2024-11-11T16:31:56,791 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:56,791 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:56,791 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:31:56,792 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:56,792 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:56,792 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342716791Disabling compacts and flushes for region at 1731342716791Disabling writes for close at 1731342716791Writing region close event to WAL at 1731342716792 (+1 ms)Closed at 1731342716792 2024-11-11T16:31:56,793 WARN [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/.initializing 2024-11-11T16:31:56,793 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/WALs/16b413a53992,35335,1731342716530 2024-11-11T16:31:56,798 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C35335%2C1731342716530, suffix=, logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/WALs/16b413a53992,35335,1731342716530, archiveDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/oldWALs, maxLogs=10 2024-11-11T16:31:56,799 INFO [master/16b413a53992:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b413a53992%2C35335%2C1731342716530.1731342716798 2024-11-11T16:31:56,810 INFO [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/WALs/16b413a53992,35335,1731342716530/16b413a53992%2C35335%2C1731342716530.1731342716798 2024-11-11T16:31:56,815 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44329:44329),(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:36647:36647)] 2024-11-11T16:31:56,816 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:56,817 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:56,817 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,817 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T16:31:56,821 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:56,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T16:31:56,824 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:56,825 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,828 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T16:31:56,828 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:56,829 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,831 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T16:31:56,831 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,832 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:56,832 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,833 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,833 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,835 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,835 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,836 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:56,838 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T16:31:56,842 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:56,842 INFO [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65636190, jitterRate=-0.021944552659988403}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:56,843 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731342716817Initializing all the Stores at 1731342716819 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342716819Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342716819Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342716819Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342716819Cleaning up temporary data from old regions at 1731342716836 (+17 ms)Region opened successfully at 1731342716843 (+7 ms) 2024-11-11T16:31:56,844 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T16:31:56,849 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65dfb3a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:56,850 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-11T16:31:56,850 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T16:31:56,850 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T16:31:56,851 INFO [master/16b413a53992:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T16:31:56,851 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T16:31:56,852 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-11T16:31:56,852 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T16:31:56,854 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T16:31:56,856 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T16:31:56,857 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-11T16:31:56,858 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T16:31:56,859 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T16:31:56,860 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-11T16:31:56,861 INFO [master/16b413a53992:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T16:31:56,862 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T16:31:56,863 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-11T16:31:56,864 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T16:31:56,866 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T16:31:56,868 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T16:31:56,869 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T16:31:56,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:56,871 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:56,871 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:56,871 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:56,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,872 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=16b413a53992,35335,1731342716530, sessionid=0x1002faf58d70000, setting cluster-up flag (Was=false) 2024-11-11T16:31:56,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,876 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,881 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T16:31:56,882 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,35335,1731342716530 2024-11-11T16:31:56,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,885 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:56,890 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T16:31:56,891 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=16b413a53992,35335,1731342716530 2024-11-11T16:31:56,893 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-11T16:31:56,895 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:56,895 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-11T16:31:56,896 INFO [master/16b413a53992:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T16:31:56,896 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 16b413a53992,35335,1731342716530 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T16:31:56,897 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:56,897 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:56,897 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:56,897 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/16b413a53992:0, corePoolSize=5, maxPoolSize=5 2024-11-11T16:31:56,898 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/16b413a53992:0, corePoolSize=10, maxPoolSize=10 2024-11-11T16:31:56,898 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:56,898 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:56,898 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:56,898 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731342746898 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:56,899 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T16:31:56,900 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342716900,5,FailOnTimeoutGroup] 2024-11-11T16:31:56,900 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:56,900 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342716900,5,FailOnTimeoutGroup] 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:56,900 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-11T16:31:56,900 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T16:31:56,901 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:56,901 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:56,902 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,902 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T16:31:56,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:31:56,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:31:56,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741831_1007 (size=1321) 2024-11-11T16:31:56,917 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-11T16:31:56,918 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 2024-11-11T16:31:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:31:56,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:31:56,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741832_1008 (size=32) 2024-11-11T16:31:56,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:56,934 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:31:56,936 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:31:56,936 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,936 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:56,937 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:31:56,939 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:31:56,939 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,939 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:56,940 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:31:56,941 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:31:56,942 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:56,942 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:31:56,944 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:31:56,945 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:56,945 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:56,946 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:31:56,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740 2024-11-11T16:31:56,947 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740 2024-11-11T16:31:56,949 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:31:56,950 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:31:56,950 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:56,952 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:31:56,955 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:56,956 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69157576, jitterRate=0.03052818775177002}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:56,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731342716933Initializing all the Stores at 1731342716934 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342716934Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342716934Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342716934Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342716934Cleaning up temporary data from old regions at 1731342716950 (+16 ms)Region opened successfully at 1731342716957 (+7 ms) 2024-11-11T16:31:56,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:31:56,957 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:31:56,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:31:56,957 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:31:56,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:31:56,958 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:56,958 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342716957Disabling compacts and flushes for region at 1731342716957Disabling writes for close at 1731342716957Writing region close event to WAL at 1731342716958 (+1 ms)Closed at 1731342716958 2024-11-11T16:31:56,960 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:56,961 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-11T16:31:56,961 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T16:31:56,963 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:31:56,965 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T16:31:56,975 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(746): ClusterId : 7d8fda76-967c-454f-80b5-cde91e46004f 2024-11-11T16:31:56,975 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:56,975 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(746): ClusterId : 7d8fda76-967c-454f-80b5-cde91e46004f 2024-11-11T16:31:56,975 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:56,976 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(746): ClusterId : 7d8fda76-967c-454f-80b5-cde91e46004f 2024-11-11T16:31:56,976 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T16:31:56,978 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:56,978 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:56,978 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:56,978 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:56,979 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T16:31:56,979 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T16:31:56,982 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:56,982 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:56,982 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T16:31:56,982 DEBUG [RS:1;16b413a53992:33415 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7805cb3d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:56,982 DEBUG [RS:2;16b413a53992:38681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@614dca71, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:56,982 DEBUG [RS:0;16b413a53992:42345 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35d78c65, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=16b413a53992/172.17.0.2:0 2024-11-11T16:31:57,002 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;16b413a53992:33415 2024-11-11T16:31:57,002 INFO [RS:1;16b413a53992:33415 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:57,002 INFO [RS:1;16b413a53992:33415 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:57,002 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:57,003 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,35335,1731342716530 with port=33415, startcode=1731342716618 2024-11-11T16:31:57,003 DEBUG [RS:1;16b413a53992:33415 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:57,006 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;16b413a53992:38681 2024-11-11T16:31:57,007 INFO [RS:2;16b413a53992:38681 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:57,007 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;16b413a53992:42345 2024-11-11T16:31:57,007 INFO [RS:2;16b413a53992:38681 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:57,007 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:57,007 INFO [RS:0;16b413a53992:42345 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-11T16:31:57,007 INFO [RS:0;16b413a53992:42345 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-11T16:31:57,007 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-11T16:31:57,008 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,35335,1731342716530 with port=42345, startcode=1731342716584 2024-11-11T16:31:57,008 DEBUG [RS:0;16b413a53992:42345 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:57,008 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44433, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:57,008 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(2659): reportForDuty to master=16b413a53992,35335,1731342716530 with port=38681, startcode=1731342716651 2024-11-11T16:31:57,009 DEBUG [RS:2;16b413a53992:38681 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T16:31:57,009 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,33415,1731342716618 2024-11-11T16:31:57,009 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(517): Registering regionserver=16b413a53992,33415,1731342716618 2024-11-11T16:31:57,012 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 2024-11-11T16:31:57,012 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37329 2024-11-11T16:31:57,012 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:57,014 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:57,015 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40575, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:57,015 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47907, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T16:31:57,015 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,38681,1731342716651 2024-11-11T16:31:57,016 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(517): Registering regionserver=16b413a53992,38681,1731342716651 2024-11-11T16:31:57,016 DEBUG [RS:1;16b413a53992:33415 {}] zookeeper.ZKUtil(111): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,33415,1731342716618 2024-11-11T16:31:57,016 WARN [RS:1;16b413a53992:33415 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:57,016 INFO [RS:1;16b413a53992:33415 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T16:31:57,016 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,33415,1731342716618 2024-11-11T16:31:57,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 16b413a53992,42345,1731342716584 2024-11-11T16:31:57,018 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35335 {}] master.ServerManager(517): Registering regionserver=16b413a53992,42345,1731342716584 2024-11-11T16:31:57,019 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,33415,1731342716618] 2024-11-11T16:31:57,021 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 2024-11-11T16:31:57,021 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37329 2024-11-11T16:31:57,021 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:57,024 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 2024-11-11T16:31:57,024 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37329 2024-11-11T16:31:57,024 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-11T16:31:57,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:57,026 INFO [RS:1;16b413a53992:33415 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:57,027 DEBUG [RS:0;16b413a53992:42345 {}] zookeeper.ZKUtil(111): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,42345,1731342716584 2024-11-11T16:31:57,027 WARN [RS:0;16b413a53992:42345 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:57,027 INFO [RS:0;16b413a53992:42345 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T16:31:57,027 DEBUG [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,42345,1731342716584 2024-11-11T16:31:57,028 DEBUG [RS:2;16b413a53992:38681 {}] zookeeper.ZKUtil(111): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/16b413a53992,38681,1731342716651 2024-11-11T16:31:57,028 WARN [RS:2;16b413a53992:38681 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T16:31:57,028 INFO [RS:2;16b413a53992:38681 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T16:31:57,029 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,38681,1731342716651 2024-11-11T16:31:57,035 INFO [RS:0;16b413a53992:42345 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:57,037 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,42345,1731342716584] 2024-11-11T16:31:57,037 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [16b413a53992,38681,1731342716651] 2024-11-11T16:31:57,044 INFO [RS:1;16b413a53992:33415 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:57,049 INFO [RS:0;16b413a53992:42345 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:57,057 INFO [RS:2;16b413a53992:38681 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T16:31:57,064 INFO [RS:1;16b413a53992:33415 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:57,064 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,069 INFO [RS:0;16b413a53992:42345 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:57,069 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,069 INFO [RS:2;16b413a53992:38681 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T16:31:57,070 INFO [RS:2;16b413a53992:38681 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T16:31:57,070 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,071 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:57,071 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:57,071 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-11T16:31:57,077 INFO [RS:2;16b413a53992:38681 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:57,077 INFO [RS:0;16b413a53992:42345 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:57,077 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,077 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,077 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,077 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,078 DEBUG [RS:2;16b413a53992:38681 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,078 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,079 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,079 DEBUG [RS:0;16b413a53992:42345 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,079 INFO [RS:1;16b413a53992:33415 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-11T16:31:57,079 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,079 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,079 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,079 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,079 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,080 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,080 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,38681,1731342716651-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/16b413a53992:0, corePoolSize=2, maxPoolSize=2 2024-11-11T16:31:57,080 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42345,1731342716584-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/16b413a53992:0, corePoolSize=1, maxPoolSize=1 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,080 DEBUG [RS:1;16b413a53992:33415 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0, corePoolSize=3, maxPoolSize=3 2024-11-11T16:31:57,084 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,085 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,085 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,085 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,085 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,085 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,33415,1731342716618-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:57,097 INFO [RS:2;16b413a53992:38681 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:57,097 INFO [RS:0;16b413a53992:42345 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:57,097 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,38681,1731342716651-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,097 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,097 INFO [RS:2;16b413a53992:38681 {}] regionserver.Replication(171): 16b413a53992,38681,1731342716651 started 2024-11-11T16:31:57,097 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,42345,1731342716584-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,098 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,098 INFO [RS:0;16b413a53992:42345 {}] regionserver.Replication(171): 16b413a53992,42345,1731342716584 started 2024-11-11T16:31:57,105 INFO [RS:1;16b413a53992:33415 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T16:31:57,105 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,33415,1731342716618-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,105 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,105 INFO [RS:1;16b413a53992:33415 {}] regionserver.Replication(171): 16b413a53992,33415,1731342716618 started 2024-11-11T16:31:57,115 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,115 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,42345,1731342716584, RpcServer on 16b413a53992/172.17.0.2:42345, sessionid=0x1002faf58d70001 2024-11-11T16:31:57,115 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:57,115 DEBUG [RS:0;16b413a53992:42345 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,42345,1731342716584 2024-11-11T16:31:57,115 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42345,1731342716584' 2024-11-11T16:31:57,115 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:57,116 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:57,116 WARN [16b413a53992:35335 {}] assignment.AssignmentManager(2443): No servers available; cannot place 1 unassigned regions. 2024-11-11T16:31:57,119 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:57,119 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:57,119 DEBUG [RS:0;16b413a53992:42345 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,42345,1731342716584 2024-11-11T16:31:57,119 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,42345,1731342716584' 2024-11-11T16:31:57,119 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:57,120 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,120 DEBUG [RS:0;16b413a53992:42345 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:57,120 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,38681,1731342716651, RpcServer on 16b413a53992/172.17.0.2:38681, sessionid=0x1002faf58d70003 2024-11-11T16:31:57,120 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:57,121 DEBUG [RS:2;16b413a53992:38681 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,38681,1731342716651 2024-11-11T16:31:57,121 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,38681,1731342716651' 2024-11-11T16:31:57,121 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:57,121 DEBUG [RS:0;16b413a53992:42345 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:57,121 INFO [RS:0;16b413a53992:42345 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:57,121 INFO [RS:0;16b413a53992:42345 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:57,121 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:57,122 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,122 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1482): Serving as 16b413a53992,33415,1731342716618, RpcServer on 16b413a53992/172.17.0.2:33415, sessionid=0x1002faf58d70002 2024-11-11T16:31:57,122 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:57,122 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:57,122 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T16:31:57,122 DEBUG [RS:2;16b413a53992:38681 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,38681,1731342716651 2024-11-11T16:31:57,122 DEBUG [RS:1;16b413a53992:33415 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 16b413a53992,33415,1731342716618 2024-11-11T16:31:57,122 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,33415,1731342716618' 2024-11-11T16:31:57,122 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,38681,1731342716651' 2024-11-11T16:31:57,122 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T16:31:57,122 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:57,122 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T16:31:57,123 DEBUG [RS:2;16b413a53992:38681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:57,123 DEBUG [RS:2;16b413a53992:38681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:57,123 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T16:31:57,123 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T16:31:57,123 INFO [RS:2;16b413a53992:38681 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:57,123 DEBUG [RS:1;16b413a53992:33415 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 16b413a53992,33415,1731342716618 2024-11-11T16:31:57,123 INFO [RS:2;16b413a53992:38681 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:57,123 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '16b413a53992,33415,1731342716618' 2024-11-11T16:31:57,123 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T16:31:57,124 DEBUG [RS:1;16b413a53992:33415 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T16:31:57,124 DEBUG [RS:1;16b413a53992:33415 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T16:31:57,124 INFO [RS:1;16b413a53992:33415 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T16:31:57,124 INFO [RS:1;16b413a53992:33415 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T16:31:57,226 INFO [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C38681%2C1731342716651, suffix=, logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,38681,1731342716651, archiveDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs, maxLogs=32 2024-11-11T16:31:57,227 INFO [RS:0;16b413a53992:42345 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C42345%2C1731342716584, suffix=, logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,42345,1731342716584, archiveDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs, maxLogs=32 2024-11-11T16:31:57,227 INFO [RS:1;16b413a53992:33415 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C33415%2C1731342716618, suffix=, logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,33415,1731342716618, archiveDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs, maxLogs=32 2024-11-11T16:31:57,230 INFO [RS:1;16b413a53992:33415 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b413a53992%2C33415%2C1731342716618.1731342717230 2024-11-11T16:31:57,231 INFO [RS:2;16b413a53992:38681 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b413a53992%2C38681%2C1731342716651.1731342717230 2024-11-11T16:31:57,231 INFO [RS:0;16b413a53992:42345 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b413a53992%2C42345%2C1731342716584.1731342717230 2024-11-11T16:31:57,248 INFO [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,38681,1731342716651/16b413a53992%2C38681%2C1731342716651.1731342717230 2024-11-11T16:31:57,254 INFO [RS:1;16b413a53992:33415 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,33415,1731342716618/16b413a53992%2C33415%2C1731342716618.1731342717230 2024-11-11T16:31:57,258 INFO [RS:0;16b413a53992:42345 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,42345,1731342716584/16b413a53992%2C42345%2C1731342716584.1731342717230 2024-11-11T16:31:57,265 DEBUG [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36647:36647),(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:44329:44329)] 2024-11-11T16:31:57,266 DEBUG [RS:1;16b413a53992:33415 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36647:36647),(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:44329:44329)] 2024-11-11T16:31:57,267 DEBUG [RS:0;16b413a53992:42345 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36647:36647),(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:44329:44329)] 2024-11-11T16:31:57,367 DEBUG [16b413a53992:35335 {}] assignment.AssignmentManager(2464): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T16:31:57,367 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:31:57,369 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:31:57,370 INFO [16b413a53992:35335 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:31:57,370 INFO [16b413a53992:35335 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:31:57,370 INFO [16b413a53992:35335 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:31:57,370 DEBUG [16b413a53992:35335 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:31:57,370 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=16b413a53992,38681,1731342716651 2024-11-11T16:31:57,373 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,38681,1731342716651, state=OPENING 2024-11-11T16:31:57,374 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T16:31:57,376 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:57,376 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:57,377 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T16:31:57,377 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,377 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,38681,1731342716651}] 2024-11-11T16:31:57,377 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,378 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,378 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,532 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:31:57,533 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59869, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:31:57,538 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-11T16:31:57,539 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T16:31:57,540 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-11T16:31:57,542 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=16b413a53992%2C38681%2C1731342716651.meta, suffix=.meta, logDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,38681,1731342716651, archiveDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs, maxLogs=32 2024-11-11T16:31:57,544 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 16b413a53992%2C38681%2C1731342716651.meta.1731342717543.meta 2024-11-11T16:31:57,547 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T16:31:57,566 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/WALs/16b413a53992,38681,1731342716651/16b413a53992%2C38681%2C1731342716651.meta.1731342717543.meta 2024-11-11T16:31:57,575 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36647:36647),(127.0.0.1/127.0.0.1:34285:34285),(127.0.0.1/127.0.0.1:44329:44329)] 2024-11-11T16:31:57,581 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:57,581 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T16:31:57,581 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T16:31:57,581 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T16:31:57,582 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T16:31:57,582 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:57,582 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-11T16:31:57,582 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-11T16:31:57,584 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T16:31:57,586 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T16:31:57,586 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:57,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:57,587 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-11T16:31:57,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-11T16:31:57,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:57,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:57,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T16:31:57,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T16:31:57,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:57,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:57,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T16:31:57,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T16:31:57,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:57,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T16:31:57,594 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-11T16:31:57,595 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740 2024-11-11T16:31:57,597 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740 2024-11-11T16:31:57,599 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-11T16:31:57,599 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-11T16:31:57,600 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T16:31:57,602 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-11T16:31:57,603 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66996509, jitterRate=-0.0016742199659347534}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T16:31:57,603 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-11T16:31:57,605 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731342717582Writing region info on filesystem at 1731342717582Initializing all the Stores at 1731342717584 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342717584Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342717584Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342717584Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731342717584Cleaning up temporary data from old regions at 1731342717599 (+15 ms)Running coprocessor post-open hooks at 1731342717603 (+4 ms)Region opened successfully at 1731342717605 (+2 ms) 2024-11-11T16:31:57,607 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731342717531 2024-11-11T16:31:57,611 DEBUG [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T16:31:57,611 INFO [RS_OPEN_META-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-11T16:31:57,612 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,38681,1731342716651 2024-11-11T16:31:57,614 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 16b413a53992,38681,1731342716651, state=OPEN 2024-11-11T16:31:57,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:57,616 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:57,616 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:57,616 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=16b413a53992,38681,1731342716651 2024-11-11T16:31:57,616 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,616 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,617 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T16:31:57,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T16:31:57,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T16:31:57,620 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=16b413a53992,38681,1731342716651 in 239 msec 2024-11-11T16:31:57,621 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T16:31:57,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T16:31:57,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T16:31:57,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 659 msec 2024-11-11T16:31:57,625 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-11T16:31:57,626 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-11T16:31:57,627 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:31:57,627 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,38681,1731342716651, seqNum=-1] 2024-11-11T16:31:57,628 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:31:57,630 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58197, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:31:57,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 742 msec 2024-11-11T16:31:57,638 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731342717637, completionTime=-1 2024-11-11T16:31:57,638 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T16:31:57,638 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1756): Joining cluster... 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1768): Number of RegionServers=3 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731342777641 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731342837641 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] assignment.AssignmentManager(1775): Joined the cluster in 2 msec 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,641 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-16b413a53992:35335, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,642 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,642 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,644 DEBUG [master/16b413a53992:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-11T16:31:57,647 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.945sec 2024-11-11T16:31:57,647 INFO [master/16b413a53992:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T16:31:57,647 INFO [master/16b413a53992:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T16:31:57,647 INFO [master/16b413a53992:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T16:31:57,647 INFO [master/16b413a53992:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T16:31:57,648 INFO [master/16b413a53992:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T16:31:57,648 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T16:31:57,648 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T16:31:57,651 DEBUG [master/16b413a53992:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-11T16:31:57,651 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T16:31:57,651 INFO [master/16b413a53992:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=16b413a53992,35335,1731342716530-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T16:31:57,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ba2aca4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:57,675 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 16b413a53992,35335,-1 for getting cluster id 2024-11-11T16:31:57,675 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-11T16:31:57,677 DEBUG [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7d8fda76-967c-454f-80b5-cde91e46004f' 2024-11-11T16:31:57,677 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-11T16:31:57,678 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7d8fda76-967c-454f-80b5-cde91e46004f" 2024-11-11T16:31:57,678 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9988a82, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:57,678 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [16b413a53992,35335,-1] 2024-11-11T16:31:57,678 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-11T16:31:57,679 DEBUG [RPCClient-NioEventLoopGroup-6-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:57,680 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46526, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-11T16:31:57,682 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43946203, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T16:31:57,682 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-11T16:31:57,684 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=16b413a53992,38681,1731342716651, seqNum=-1] 2024-11-11T16:31:57,684 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:31:57,688 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40424, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:31:57,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=16b413a53992,35335,1731342716530 2024-11-11T16:31:57,692 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-11T16:31:57,693 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.AsyncConnectionImpl(321): The fetched master address is 16b413a53992,35335,1731342716530 2024-11-11T16:31:57,693 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@57379ef7 2024-11-11T16:31:57,693 DEBUG [RPCClient-NioEventLoopGroup-6-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T16:31:57,695 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46542, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T16:31:57,696 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T16:31:57,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T16:31:57,701 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T16:31:57,701 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:57,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 4 2024-11-11T16:31:57,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:57,703 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T16:31:57,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741837_1013 (size=392) 2024-11-11T16:31:57,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741837_1013 (size=392) 2024-11-11T16:31:57,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741837_1013 (size=392) 2024-11-11T16:31:57,718 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 1a4a638bb849e0f900853e52797a8866, NAME => 'TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9 2024-11-11T16:31:57,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741838_1014 (size=51) 2024-11-11T16:31:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741838_1014 (size=51) 2024-11-11T16:31:57,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741838_1014 (size=51) 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1722): Closing 1a4a638bb849e0f900853e52797a8866, disabling compactions & flushes 2024-11-11T16:31:57,733 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. after waiting 0 ms 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:57,733 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:57,733 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1676): Region close journal for 1a4a638bb849e0f900853e52797a8866: Waiting for close lock at 1731342717733Disabling compacts and flushes for region at 1731342717733Disabling writes for close at 1731342717733Writing region close event to WAL at 1731342717733Closed at 1731342717733 2024-11-11T16:31:57,735 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T16:31:57,736 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731342717736"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731342717736"}]},"ts":"1731342717736"} 2024-11-11T16:31:57,740 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-11T16:31:57,742 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T16:31:57,742 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342717742"}]},"ts":"1731342717742"} 2024-11-11T16:31:57,745 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T16:31:57,745 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {16b413a53992=0} racks are {/default-rack=0} 2024-11-11T16:31:57,746 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-11T16:31:57,747 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-11T16:31:57,747 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-11T16:31:57,747 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-11T16:31:57,747 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T16:31:57,747 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1a4a638bb849e0f900853e52797a8866, ASSIGN}] 2024-11-11T16:31:57,749 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1a4a638bb849e0f900853e52797a8866, ASSIGN 2024-11-11T16:31:57,751 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1a4a638bb849e0f900853e52797a8866, ASSIGN; state=OFFLINE, location=16b413a53992,33415,1731342716618; forceNewPlan=false, retain=false 2024-11-11T16:31:57,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:57,902 INFO [16b413a53992:35335 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T16:31:57,902 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1a4a638bb849e0f900853e52797a8866, regionState=OPENING, regionLocation=16b413a53992,33415,1731342716618 2024-11-11T16:31:57,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1a4a638bb849e0f900853e52797a8866, ASSIGN because future has completed 2024-11-11T16:31:57,907 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a4a638bb849e0f900853e52797a8866, server=16b413a53992,33415,1731342716618}] 2024-11-11T16:31:58,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:58,061 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T16:31:58,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57967, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T16:31:58,068 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,068 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 1a4a638bb849e0f900853e52797a8866, NAME => 'TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.', STARTKEY => '', ENDKEY => ''} 2024-11-11T16:31:58,069 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,069 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T16:31:58,069 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,069 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,071 INFO [StoreOpener-1a4a638bb849e0f900853e52797a8866-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,073 INFO [StoreOpener-1a4a638bb849e0f900853e52797a8866-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1a4a638bb849e0f900853e52797a8866 columnFamilyName cf 2024-11-11T16:31:58,073 DEBUG [StoreOpener-1a4a638bb849e0f900853e52797a8866-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T16:31:58,073 INFO [StoreOpener-1a4a638bb849e0f900853e52797a8866-1 {}] regionserver.HStore(327): Store=1a4a638bb849e0f900853e52797a8866/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T16:31:58,074 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,075 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,075 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,076 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,076 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,079 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,081 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T16:31:58,082 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 1a4a638bb849e0f900853e52797a8866; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70138399, jitterRate=0.04514358937740326}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T16:31:58,082 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,084 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 1a4a638bb849e0f900853e52797a8866: Running coprocessor pre-open hook at 1731342718069Writing region info on filesystem at 1731342718069Initializing all the Stores at 1731342718070 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731342718071 (+1 ms)Cleaning up temporary data from old regions at 1731342718076 (+5 ms)Running coprocessor post-open hooks at 1731342718082 (+6 ms)Region opened successfully at 1731342718084 (+2 ms) 2024-11-11T16:31:58,086 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866., pid=6, masterSystemTime=1731342718060 2024-11-11T16:31:58,090 DEBUG [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,090 INFO [RS_OPEN_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,091 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=1a4a638bb849e0f900853e52797a8866, regionState=OPEN, openSeqNum=2, regionLocation=16b413a53992,33415,1731342716618 2024-11-11T16:31:58,095 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-10-1 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1a4a638bb849e0f900853e52797a8866, server=16b413a53992,33415,1731342716618 because future has completed 2024-11-11T16:31:58,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T16:31:58,102 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 1a4a638bb849e0f900853e52797a8866, server=16b413a53992,33415,1731342716618 in 190 msec 2024-11-11T16:31:58,108 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T16:31:58,109 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=1a4a638bb849e0f900853e52797a8866, ASSIGN in 355 msec 2024-11-11T16:31:58,110 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T16:31:58,111 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731342718110"}]},"ts":"1731342718110"} 2024-11-11T16:31:58,114 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T16:31:58,116 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T16:31:58,120 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestHBaseWalOnEC in 420 msec 2024-11-11T16:31:58,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-11T16:31:58,327 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T16:31:58,327 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(3046): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T16:31:58,327 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:31:58,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3100): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T16:31:58,331 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T16:31:58,331 INFO [Time-limited test {}] hbase.HBaseTestingUtil(3120): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T16:31:58,334 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestHBaseWalOnEC', row='row', locateType=CURRENT is [region=TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866., hostname=16b413a53992,33415,1731342716618, seqNum=2] 2024-11-11T16:31:58,335 DEBUG [RPCClient-NioEventLoopGroup-6-7 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T16:31:58,338 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-9-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59000, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T16:31:58,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-11T16:31:58,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC 2024-11-11T16:31:58,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:58,346 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T16:31:58,347 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T16:31:58,347 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T16:31:58,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:58,503 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33415 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-11T16:31:58,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,504 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 1a4a638bb849e0f900853e52797a8866 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T16:31:58,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/.tmp/cf/626d116cf39744e1a333157675cc968f is 36, key is row/cf:cq/1731342718339/Put/seqid=0 2024-11-11T16:31:58,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741839_1015 (size=4787) 2024-11-11T16:31:58,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741839_1015 (size=4787) 2024-11-11T16:31:58,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741839_1015 (size=4787) 2024-11-11T16:31:58,539 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/.tmp/cf/626d116cf39744e1a333157675cc968f 2024-11-11T16:31:58,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/.tmp/cf/626d116cf39744e1a333157675cc968f as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/cf/626d116cf39744e1a333157675cc968f 2024-11-11T16:31:58,558 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/cf/626d116cf39744e1a333157675cc968f, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T16:31:58,560 INFO [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 1a4a638bb849e0f900853e52797a8866 in 56ms, sequenceid=5, compaction requested=false 2024-11-11T16:31:58,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 1a4a638bb849e0f900853e52797a8866: 2024-11-11T16:31:58,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,560 DEBUG [RS_FLUSH_OPERATIONS-regionserver/16b413a53992:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-11T16:31:58,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-11T16:31:58,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-11T16:31:58,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 216 msec 2024-11-11T16:31:58,572 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestHBaseWalOnEC in 227 msec 2024-11-11T16:31:58,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35335 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-11T16:31:58,667 INFO [RPCClient-NioEventLoopGroup-6-8 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC completed 2024-11-11T16:31:58,672 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-11T16:31:58,672 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:31:58,672 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:58,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,672 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,673 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-11T16:31:58,673 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T16:31:58,673 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1217487564, stopped=false 2024-11-11T16:31:58,673 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=16b413a53992,35335,1731342716530 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:58,675 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T16:31:58,675 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:58,675 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:31:58,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:58,675 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-11T16:31:58,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:58,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:58,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:58,676 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T16:31:58,676 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC.tearDown(TestHBaseWalOnEC.java:101) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.runners.ParentRunner.run(ParentRunner.java:413) at org.junit.runners.Suite.runChild(Suite.java:128) at org.junit.runners.Suite.runChild(Suite.java:27) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:58,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,676 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,42345,1731342716584' ***** 2024-11-11T16:31:58,676 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:58,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,33415,1731342716618' ***** 2024-11-11T16:31:58,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:58,677 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '16b413a53992,38681,1731342716651' ***** 2024-11-11T16:31:58,677 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-11T16:31:58,677 INFO [RS:0;16b413a53992:42345 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:58,677 INFO [RS:1;16b413a53992:33415 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:58,677 INFO [RS:2;16b413a53992:38681 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T16:31:58,677 INFO [RS:0;16b413a53992:42345 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:58,677 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:58,677 INFO [RS:1;16b413a53992:33415 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:58,677 INFO [RS:0;16b413a53992:42345 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:58,677 INFO [RS:1;16b413a53992:33415 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:58,677 INFO [RS:2;16b413a53992:38681 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T16:31:58,677 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:58,677 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(3091): Received CLOSE for 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,677 INFO [RS:2;16b413a53992:38681 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T16:31:58,677 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,38681,1731342716651 2024-11-11T16:31:58,677 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,42345,1731342716584 2024-11-11T16:31:58,677 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-11T16:31:58,677 INFO [RS:2;16b413a53992:38681 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:58,678 INFO [RS:0;16b413a53992:42345 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:58,678 INFO [RS:2;16b413a53992:38681 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;16b413a53992:38681. 2024-11-11T16:31:58,678 INFO [RS:0;16b413a53992:42345 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;16b413a53992:42345. 2024-11-11T16:31:58,678 DEBUG [RS:2;16b413a53992:38681 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:58,678 DEBUG [RS:0;16b413a53992:42345 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:58,678 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(959): stopping server 16b413a53992,33415,1731342716618 2024-11-11T16:31:58,678 DEBUG [RS:2;16b413a53992:38681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,678 DEBUG [RS:0;16b413a53992:42345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,678 INFO [RS:1;16b413a53992:33415 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:58,678 INFO [RS:2;16b413a53992:38681 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:58,678 INFO [RS:2;16b413a53992:38681 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:58,678 INFO [RS:1;16b413a53992:33415 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;16b413a53992:33415. 2024-11-11T16:31:58,678 INFO [RS:2;16b413a53992:38681 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:58,678 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,42345,1731342716584; all regions closed. 2024-11-11T16:31:58,678 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-11T16:31:58,678 DEBUG [RS:1;16b413a53992:33415 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T16:31:58,678 DEBUG [RS:1;16b413a53992:33415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,679 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T16:31:58,679 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1325): Online Regions={1a4a638bb849e0f900853e52797a8866=TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866.} 2024-11-11T16:31:58,679 DEBUG [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1351): Waiting on 1a4a638bb849e0f900853e52797a8866 2024-11-11T16:31:58,679 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-11T16:31:58,679 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T16:31:58,679 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1a4a638bb849e0f900853e52797a8866, disabling compactions & flushes 2024-11-11T16:31:58,679 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-11T16:31:58,679 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. after waiting 0 ms 2024-11-11T16:31:58,679 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,679 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.34 KB heapSize=3.38 KB 2024-11-11T16:31:58,680 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,680 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,680 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,680 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,680 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,681 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:58,681 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741834_1010 (size=93) 2024-11-11T16:31:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741834_1010 (size=93) 2024-11-11T16:31:58,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741834_1010 (size=93) 2024-11-11T16:31:58,686 INFO [regionserver/16b413a53992:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:58,689 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/default/TestHBaseWalOnEC/1a4a638bb849e0f900853e52797a8866/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T16:31:58,691 INFO [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,691 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1a4a638bb849e0f900853e52797a8866: Waiting for close lock at 1731342718679Running coprocessor pre-close hooks at 1731342718679Disabling compacts and flushes for region at 1731342718679Disabling writes for close at 1731342718679Writing region close event to WAL at 1731342718681 (+2 ms)Running coprocessor post-close hooks at 1731342718690 (+9 ms)Closed at 1731342718691 (+1 ms) 2024-11-11T16:31:58,691 DEBUG [RS_CLOSE_REGION-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866. 2024-11-11T16:31:58,706 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/info/abc65ef8f29b4091a50282fb87dd98da is 153, key is TestHBaseWalOnEC,,1731342717696.1a4a638bb849e0f900853e52797a8866./info:regioninfo/1731342718091/Put/seqid=0 2024-11-11T16:31:58,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741840_1016 (size=6637) 2024-11-11T16:31:58,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741840_1016 (size=6637) 2024-11-11T16:31:58,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741840_1016 (size=6637) 2024-11-11T16:31:58,720 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.18 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/info/abc65ef8f29b4091a50282fb87dd98da 2024-11-11T16:31:58,755 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/ns/8bea83b6e890412dae9406c4b6773847 is 43, key is default/ns:d/1731342717630/Put/seqid=0 2024-11-11T16:31:58,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741841_1017 (size=5153) 2024-11-11T16:31:58,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741841_1017 (size=5153) 2024-11-11T16:31:58,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741841_1017 (size=5153) 2024-11-11T16:31:58,767 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/ns/8bea83b6e890412dae9406c4b6773847 2024-11-11T16:31:58,796 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/table/9275483d45f14384a9c2f5b52a33955a is 52, key is TestHBaseWalOnEC/table:state/1731342718110/Put/seqid=0 2024-11-11T16:31:58,798 WARN [IPC Server handler 4 on default port 37329 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-11T16:31:58,798 WARN [IPC Server handler 4 on default port 37329 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-11T16:31:58,798 WARN [IPC Server handler 4 on default port 37329 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-11T16:31:58,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741842_1018 (size=5249) 2024-11-11T16:31:58,814 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=96 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/table/9275483d45f14384a9c2f5b52a33955a 2024-11-11T16:31:58,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741842_1018 (size=5249) 2024-11-11T16:31:58,835 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/info/abc65ef8f29b4091a50282fb87dd98da as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/info/abc65ef8f29b4091a50282fb87dd98da 2024-11-11T16:31:58,848 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/info/abc65ef8f29b4091a50282fb87dd98da, entries=10, sequenceid=11, filesize=6.5 K 2024-11-11T16:31:58,850 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/ns/8bea83b6e890412dae9406c4b6773847 as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/ns/8bea83b6e890412dae9406c4b6773847 2024-11-11T16:31:58,874 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/ns/8bea83b6e890412dae9406c4b6773847, entries=2, sequenceid=11, filesize=5.0 K 2024-11-11T16:31:58,876 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/.tmp/table/9275483d45f14384a9c2f5b52a33955a as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/table/9275483d45f14384a9c2f5b52a33955a 2024-11-11T16:31:58,879 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,33415,1731342716618; all regions closed. 2024-11-11T16:31:58,881 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,884 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,884 DEBUG [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-11T16:31:58,885 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,887 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,887 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:58,890 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/table/9275483d45f14384a9c2f5b52a33955a, entries=2, sequenceid=11, filesize=5.1 K 2024-11-11T16:31:58,892 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 213ms, sequenceid=11, compaction requested=false 2024-11-11T16:31:58,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741835_1011 (size=1298) 2024-11-11T16:31:58,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741835_1011 (size=1298) 2024-11-11T16:31:58,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741835_1011 (size=1298) 2024-11-11T16:31:58,907 DEBUG [RS:1;16b413a53992:33415 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs 2024-11-11T16:31:58,907 INFO [RS:1;16b413a53992:33415 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b413a53992%2C33415%2C1731342716618:(num 1731342717230) 2024-11-11T16:31:58,908 DEBUG [RS:1;16b413a53992:33415 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:58,908 INFO [RS:1;16b413a53992:33415 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:58,908 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-11T16:31:58,908 INFO [RS:1;16b413a53992:33415 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:58,909 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:58,909 INFO [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:58,909 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731342718679Running coprocessor pre-close hooks at 1731342718679Disabling compacts and flushes for region at 1731342718679Disabling writes for close at 1731342718679Obtaining lock to block concurrent updates at 1731342718679Preparing flush snapshotting stores in 1588230740 at 1731342718679Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1377, getHeapSize=3392, getOffHeapSize=0, getCellsCount=14 at 1731342718680 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731342718682 (+2 ms)Flushing 1588230740/info: creating writer at 1731342718682Flushing 1588230740/info: appending metadata at 1731342718706 (+24 ms)Flushing 1588230740/info: closing flushed file at 1731342718706Flushing 1588230740/ns: creating writer at 1731342718736 (+30 ms)Flushing 1588230740/ns: appending metadata at 1731342718755 (+19 ms)Flushing 1588230740/ns: closing flushed file at 1731342718755Flushing 1588230740/table: creating writer at 1731342718775 (+20 ms)Flushing 1588230740/table: appending metadata at 1731342718795 (+20 ms)Flushing 1588230740/table: closing flushed file at 1731342718795Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5950646d: reopening flushed file at 1731342718833 (+38 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@515dbc9c: reopening flushed file at 1731342718848 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@761d9900: reopening flushed file at 1731342718875 (+27 ms)Finished flush of dataSize ~1.34 KB/1377, heapSize ~3.08 KB/3152, currentSize=0 B/0 for 1588230740 in 213ms, sequenceid=11, compaction requested=false at 1731342718892 (+17 ms)Writing region close event to WAL at 1731342718896 (+4 ms)Running coprocessor post-close hooks at 1731342718909 (+13 ms)Closed at 1731342718909 2024-11-11T16:31:58,909 INFO [RS:1;16b413a53992:33415 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33415 2024-11-11T16:31:58,909 DEBUG [RS_CLOSE_META-regionserver/16b413a53992:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T16:31:58,911 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:58,912 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:58,920 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,33415,1731342716618 2024-11-11T16:31:58,920 INFO [RS:1;16b413a53992:33415 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:58,920 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,33415,1731342716618] 2024-11-11T16:31:58,922 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,33415,1731342716618 already deleted, retry=false 2024-11-11T16:31:58,922 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,33415,1731342716618 expired; onlineServers=2 2024-11-11T16:31:59,022 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,022 INFO [RS:1;16b413a53992:33415 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:59,022 DEBUG [pool-330-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33415-0x1002faf58d70002, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,022 INFO [RS:1;16b413a53992:33415 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,33415,1731342716618; zookeeper connection closed. 2024-11-11T16:31:59,023 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@282ac67d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@282ac67d 2024-11-11T16:31:59,080 INFO [regionserver/16b413a53992:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T16:31:59,080 INFO [regionserver/16b413a53992:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T16:31:59,085 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(976): stopping server 16b413a53992,38681,1731342716651; all regions closed. 2024-11-11T16:31:59,091 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,092 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,092 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,093 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,094 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,094 DEBUG [RS:0;16b413a53992:42345 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs 2024-11-11T16:31:59,094 INFO [RS:0;16b413a53992:42345 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b413a53992%2C42345%2C1731342716584:(num 1731342717230) 2024-11-11T16:31:59,094 DEBUG [RS:0;16b413a53992:42345 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:59,094 INFO [RS:0;16b413a53992:42345 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:59,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741836_1012 (size=2751) 2024-11-11T16:31:59,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741836_1012 (size=2751) 2024-11-11T16:31:59,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741836_1012 (size=2751) 2024-11-11T16:31:59,100 INFO [RS:0;16b413a53992:42345 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T16:31:59,101 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:59,101 INFO [RS:0;16b413a53992:42345 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42345 2024-11-11T16:31:59,123 DEBUG [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs 2024-11-11T16:31:59,123 INFO [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b413a53992%2C38681%2C1731342716651.meta:.meta(num 1731342717543) 2024-11-11T16:31:59,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,42345,1731342716584 2024-11-11T16:31:59,125 INFO [RS:0;16b413a53992:42345 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:59,125 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$368/0x00007f573c8f40f0@52c13852 rejected from java.util.concurrent.ThreadPoolExecutor@17c3ac6e[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-11T16:31:59,129 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:59,132 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,137 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,42345,1731342716584] 2024-11-11T16:31:59,139 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,42345,1731342716584 already deleted, retry=false 2024-11-11T16:31:59,139 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,42345,1731342716584 expired; onlineServers=1 2024-11-11T16:31:59,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741833_1009 (size=93) 2024-11-11T16:31:59,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741833_1009 (size=93) 2024-11-11T16:31:59,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741833_1009 (size=93) 2024-11-11T16:31:59,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,237 INFO [RS:0;16b413a53992:42345 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:59,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42345-0x1002faf58d70001, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,238 INFO [RS:0;16b413a53992:42345 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,42345,1731342716584; zookeeper connection closed. 2024-11-11T16:31:59,238 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@20481155 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@20481155 2024-11-11T16:31:59,313 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T16:31:59,313 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T16:31:59,545 DEBUG [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/oldWALs 2024-11-11T16:31:59,545 INFO [RS:2;16b413a53992:38681 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 16b413a53992%2C38681%2C1731342716651:(num 1731342717230) 2024-11-11T16:31:59,545 DEBUG [RS:2;16b413a53992:38681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T16:31:59,545 INFO [RS:2;16b413a53992:38681 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T16:31:59,545 INFO [RS:2;16b413a53992:38681 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:59,546 INFO [RS:2;16b413a53992:38681 {}] hbase.ChoreService(370): Chore service for: regionserver/16b413a53992:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:59,546 INFO [RS:2;16b413a53992:38681 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:59,546 INFO [regionserver/16b413a53992:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:59,546 INFO [RS:2;16b413a53992:38681 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38681 2024-11-11T16:31:59,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/16b413a53992,38681,1731342716651 2024-11-11T16:31:59,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T16:31:59,548 INFO [RS:2;16b413a53992:38681 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:59,549 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [16b413a53992,38681,1731342716651] 2024-11-11T16:31:59,551 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/16b413a53992,38681,1731342716651 already deleted, retry=false 2024-11-11T16:31:59,551 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 16b413a53992,38681,1731342716651 expired; onlineServers=0 2024-11-11T16:31:59,551 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '16b413a53992,35335,1731342716530' ***** 2024-11-11T16:31:59,551 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T16:31:59,551 INFO [M:0;16b413a53992:35335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-11T16:31:59,551 INFO [M:0;16b413a53992:35335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-11T16:31:59,551 DEBUG [M:0;16b413a53992:35335 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T16:31:59,551 DEBUG [M:0;16b413a53992:35335 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T16:31:59,551 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T16:31:59,551 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342716900 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.small.0-1731342716900,5,FailOnTimeoutGroup] 2024-11-11T16:31:59,552 DEBUG [master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342716900 {}] cleaner.HFileCleaner(306): Exit Thread[master/16b413a53992:0:becomeActiveMaster-HFileCleaner.large.0-1731342716900,5,FailOnTimeoutGroup] 2024-11-11T16:31:59,552 INFO [M:0;16b413a53992:35335 {}] hbase.ChoreService(370): Chore service for: master/16b413a53992:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-11T16:31:59,552 INFO [M:0;16b413a53992:35335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-11T16:31:59,552 DEBUG [M:0;16b413a53992:35335 {}] master.HMaster(1795): Stopping service threads 2024-11-11T16:31:59,552 INFO [M:0;16b413a53992:35335 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T16:31:59,552 INFO [M:0;16b413a53992:35335 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-11T16:31:59,552 INFO [M:0;16b413a53992:35335 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T16:31:59,552 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T16:31:59,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T16:31:59,553 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T16:31:59,553 DEBUG [M:0;16b413a53992:35335 {}] zookeeper.ZKUtil(347): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T16:31:59,553 WARN [M:0;16b413a53992:35335 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T16:31:59,554 INFO [M:0;16b413a53992:35335 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/.lastflushedseqids 2024-11-11T16:31:59,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741843_1019 (size=127) 2024-11-11T16:31:59,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741843_1019 (size=127) 2024-11-11T16:31:59,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741843_1019 (size=127) 2024-11-11T16:31:59,564 INFO [M:0;16b413a53992:35335 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-11T16:31:59,564 INFO [M:0;16b413a53992:35335 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T16:31:59,564 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T16:31:59,564 INFO [M:0;16b413a53992:35335 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:59,564 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:59,564 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T16:31:59,564 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:59,565 INFO [M:0;16b413a53992:35335 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=26.83 KB heapSize=34.12 KB 2024-11-11T16:31:59,591 DEBUG [M:0;16b413a53992:35335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cebaccef50134ea197385a2ca02dc936 is 82, key is hbase:meta,,1/info:regioninfo/1731342717612/Put/seqid=0 2024-11-11T16:31:59,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741844_1020 (size=5672) 2024-11-11T16:31:59,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741844_1020 (size=5672) 2024-11-11T16:31:59,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741844_1020 (size=5672) 2024-11-11T16:31:59,601 INFO [M:0;16b413a53992:35335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cebaccef50134ea197385a2ca02dc936 2024-11-11T16:31:59,635 DEBUG [M:0;16b413a53992:35335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d74fe01a3c3466aa848b2062b13c913 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731342718118/Put/seqid=0 2024-11-11T16:31:59,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741845_1021 (size=6439) 2024-11-11T16:31:59,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741845_1021 (size=6439) 2024-11-11T16:31:59,646 INFO [M:0;16b413a53992:35335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.14 KB at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d74fe01a3c3466aa848b2062b13c913 2024-11-11T16:31:59,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741845_1021 (size=6439) 2024-11-11T16:31:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,650 INFO [RS:2;16b413a53992:38681 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:59,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38681-0x1002faf58d70003, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,650 INFO [RS:2;16b413a53992:38681 {}] regionserver.HRegionServer(1031): Exiting; stopping=16b413a53992,38681,1731342716651; zookeeper connection closed. 2024-11-11T16:31:59,651 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@70b6cf9d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@70b6cf9d 2024-11-11T16:31:59,651 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T16:31:59,674 DEBUG [M:0;16b413a53992:35335 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8ba5fdd0dfa4d00929936456b9b1e9b is 69, key is 16b413a53992,33415,1731342716618/rs:state/1731342717010/Put/seqid=0 2024-11-11T16:31:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741846_1022 (size=5294) 2024-11-11T16:31:59,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741846_1022 (size=5294) 2024-11-11T16:31:59,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741846_1022 (size=5294) 2024-11-11T16:31:59,684 INFO [M:0;16b413a53992:35335 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=72 (bloomFilter=true), to=hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8ba5fdd0dfa4d00929936456b9b1e9b 2024-11-11T16:31:59,691 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cebaccef50134ea197385a2ca02dc936 as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cebaccef50134ea197385a2ca02dc936 2024-11-11T16:31:59,702 INFO [M:0;16b413a53992:35335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cebaccef50134ea197385a2ca02dc936, entries=8, sequenceid=72, filesize=5.5 K 2024-11-11T16:31:59,703 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0d74fe01a3c3466aa848b2062b13c913 as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0d74fe01a3c3466aa848b2062b13c913 2024-11-11T16:31:59,709 INFO [M:0;16b413a53992:35335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0d74fe01a3c3466aa848b2062b13c913, entries=8, sequenceid=72, filesize=6.3 K 2024-11-11T16:31:59,711 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/a8ba5fdd0dfa4d00929936456b9b1e9b as hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a8ba5fdd0dfa4d00929936456b9b1e9b 2024-11-11T16:31:59,718 INFO [M:0;16b413a53992:35335 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37329/user/jenkins/test-data/b1ce22fd-a1f6-22a2-5989-51d670644ac9/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/a8ba5fdd0dfa4d00929936456b9b1e9b, entries=3, sequenceid=72, filesize=5.2 K 2024-11-11T16:31:59,720 INFO [M:0;16b413a53992:35335 {}] regionserver.HRegion(3140): Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=72, compaction requested=false 2024-11-11T16:31:59,721 INFO [M:0;16b413a53992:35335 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T16:31:59,721 DEBUG [M:0;16b413a53992:35335 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731342719564Disabling compacts and flushes for region at 1731342719564Disabling writes for close at 1731342719564Obtaining lock to block concurrent updates at 1731342719565 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731342719565Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=27471, getHeapSize=34872, getOffHeapSize=0, getCellsCount=85 at 1731342719565Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731342719566 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731342719567 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731342719591 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731342719591Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731342719609 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731342719635 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731342719635Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731342719655 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731342719674 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731342719674Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@475a8e0d: reopening flushed file at 1731342719690 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@634b7238: reopening flushed file at 1731342719702 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b649671: reopening flushed file at 1731342719710 (+8 ms)Finished flush of dataSize ~26.83 KB/27471, heapSize ~33.82 KB/34632, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=72, compaction requested=false at 1731342719720 (+10 ms)Writing region close event to WAL at 1731342719721 (+1 ms)Closed at 1731342719721 2024-11-11T16:31:59,722 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,722 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,722 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,722 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,722 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-11T16:31:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38687 is added to blk_1073741830_1006 (size=32674) 2024-11-11T16:31:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37785 is added to blk_1073741830_1006 (size=32674) 2024-11-11T16:31:59,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45013 is added to blk_1073741830_1006 (size=32674) 2024-11-11T16:31:59,726 INFO [M:0;16b413a53992:35335 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-11T16:31:59,726 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-11T16:31:59,727 INFO [M:0;16b413a53992:35335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35335 2024-11-11T16:31:59,727 INFO [M:0;16b413a53992:35335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-11T16:31:59,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,832 INFO [M:0;16b413a53992:35335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-11T16:31:59,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35335-0x1002faf58d70000, quorum=127.0.0.1:56960, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T16:31:59,835 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f9b588c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:59,835 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38e5384{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:59,835 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:59,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b7fa3ef{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:59,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12d3303{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:59,837 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:59,837 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:59,837 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-585791280-172.17.0.2-1731342714841 (Datanode Uuid 80c81122-302d-44c9-991f-9e7a335e633a) service to localhost/127.0.0.1:37329 2024-11-11T16:31:59,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:59,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data5/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,838 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data6/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,839 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:59,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@152462a3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:59,841 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2e4c23ba{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:59,841 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:59,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@55791d09{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:59,841 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3236f207{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:59,843 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:59,843 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:59,843 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-585791280-172.17.0.2-1731342714841 (Datanode Uuid 064eb470-0b4e-4bd5-b241-3d694051aa5d) service to localhost/127.0.0.1:37329 2024-11-11T16:31:59,843 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:59,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data3/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,844 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data4/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,844 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:59,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@24e08cba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T16:31:59,847 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45b09adf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:59,847 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:59,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78ab2b00{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:59,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c8d1a40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:59,849 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T16:31:59,849 WARN [BP-585791280-172.17.0.2-1731342714841 heartbeating to localhost/127.0.0.1:37329 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-585791280-172.17.0.2-1731342714841 (Datanode Uuid 6b338e59-e471-4779-b2cc-861ef2d6a779) service to localhost/127.0.0.1:37329 2024-11-11T16:31:59,849 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T16:31:59,849 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T16:31:59,849 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data1/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,850 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/cluster_70deeee1-cb28-7b18-7d4d-c7138a4f9c47/data/data2/current/BP-585791280-172.17.0.2-1731342714841 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T16:31:59,850 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T16:31:59,859 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@67bdd5ed{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T16:31:59,860 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2d952814{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T16:31:59,860 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T16:31:59,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41ab5cc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T16:31:59,860 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@758ed3c0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3/hbase-server/target/test-data/9556502e-bd06-3d33-7794-2d9027f9f2b4/hadoop.log.dir/,STOPPED} 2024-11-11T16:31:59,870 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-11T16:31:59,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-11T16:31:59,911 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=148 (was 88) - Thread LEAK? -, OpenFileDescriptor=517 (was 447) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=546 (was 498) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3345 (was 2790) - AvailableMemoryMB LEAK? -