2024-11-11 04:03:40,702 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@8462f31 2024-11-11 04:03:40,739 main DEBUG Took 0.033449 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 04:03:40,743 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 04:03:40,744 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 04:03:40,746 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 04:03:40,748 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,775 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 04:03:40,799 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,802 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,803 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,803 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,804 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,804 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,806 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,806 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,807 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,808 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,809 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,809 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,810 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,811 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,811 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,843 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,845 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,845 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,846 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,846 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,847 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,847 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,848 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,848 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 04:03:40,849 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,849 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 04:03:40,851 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 04:03:40,862 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 04:03:40,866 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 04:03:40,869 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 04:03:40,871 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 04:03:40,872 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 04:03:40,890 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 04:03:40,901 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 04:03:40,904 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 04:03:40,905 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 04:03:40,906 main DEBUG createAppenders(={Console}) 2024-11-11 04:03:40,907 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@8462f31 initialized 2024-11-11 04:03:40,907 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@8462f31 2024-11-11 04:03:40,908 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@8462f31 OK. 2024-11-11 04:03:40,908 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 04:03:40,910 main DEBUG OutputStream closed 2024-11-11 04:03:40,910 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 04:03:40,911 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 04:03:40,911 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@73700b80 OK 2024-11-11 04:03:41,034 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 04:03:41,037 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 04:03:41,039 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 04:03:41,040 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 04:03:41,041 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 04:03:41,042 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 04:03:41,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 04:03:41,043 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 04:03:41,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 04:03:41,044 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 04:03:41,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 04:03:41,045 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 04:03:41,046 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 04:03:41,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 04:03:41,047 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 04:03:41,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 04:03:41,048 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 04:03:41,050 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 04:03:41,052 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 04:03:41,053 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7b420819) with optional ClassLoader: null 2024-11-11 04:03:41,053 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 04:03:41,055 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7b420819] started OK. 2024-11-11T04:03:41,081 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestHBaseWalOnEC timeout: 26 mins 2024-11-11 04:03:41,086 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 04:03:41,086 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T04:03:41,657 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257 2024-11-11T04:03:41,695 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1, deleteOnExit=true 2024-11-11T04:03:41,697 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/test.cache.data in system properties and HBase conf 2024-11-11T04:03:41,702 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:03:41,704 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:03:41,705 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:03:41,706 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:03:41,706 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-11T04:03:41,838 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T04:03:41,957 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:03:41,963 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:03:41,964 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:03:41,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:03:41,965 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:03:41,966 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:03:41,967 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:03:41,967 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:03:41,968 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:03:41,969 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:03:41,969 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:03:41,970 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:03:41,970 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:03:41,971 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:03:41,972 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:03:43,175 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T04:03:43,289 INFO [Time-limited test {}] log.Log(170): Logging initialized @3910ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T04:03:43,381 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:43,470 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:43,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:43,504 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:43,507 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:03:43,527 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:43,531 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c3a779d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:43,532 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a436cce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:43,762 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3f706372{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/java.io.tmpdir/jetty-localhost-43319-hadoop-hdfs-3_4_1-tests_jar-_-any-1736289633847880315/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:03:43,773 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2c67d4dc{HTTP/1.1, (http/1.1)}{localhost:43319} 2024-11-11T04:03:43,774 INFO [Time-limited test {}] server.Server(415): Started @4396ms 2024-11-11T04:03:44,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:44,348 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:44,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:44,350 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:44,350 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:03:44,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@657ab07f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:44,352 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@72bec311{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:44,486 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@61a09aaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/java.io.tmpdir/jetty-localhost-38685-hadoop-hdfs-3_4_1-tests_jar-_-any-9879157504153874231/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:44,487 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@487ce309{HTTP/1.1, (http/1.1)}{localhost:38685} 2024-11-11T04:03:44,487 INFO [Time-limited test {}] server.Server(415): Started @5109ms 2024-11-11T04:03:44,557 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:44,689 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:44,695 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:44,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:44,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:44,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:03:44,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3c2481f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:44,703 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@31a55d25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:44,842 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3af8c6aa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/java.io.tmpdir/jetty-localhost-37363-hadoop-hdfs-3_4_1-tests_jar-_-any-8479320270556734551/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:44,842 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5a19409c{HTTP/1.1, (http/1.1)}{localhost:37363} 2024-11-11T04:03:44,843 INFO [Time-limited test {}] server.Server(415): Started @5465ms 2024-11-11T04:03:44,845 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:44,905 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:44,910 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:44,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:44,916 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:44,916 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:03:44,917 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66733fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:44,918 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6c51149d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:45,034 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@216c38ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/java.io.tmpdir/jetty-localhost-36785-hadoop-hdfs-3_4_1-tests_jar-_-any-367734952736180426/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:45,034 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@27b3056c{HTTP/1.1, (http/1.1)}{localhost:36785} 2024-11-11T04:03:45,035 INFO [Time-limited test {}] server.Server(415): Started @5657ms 2024-11-11T04:03:45,037 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:45,789 WARN [Thread-123 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data4/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,790 WARN [Thread-122 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data3/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,795 WARN [Thread-124 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data1/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,796 WARN [Thread-125 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data2/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,837 WARN [Thread-133 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data5/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,847 WARN [Thread-135 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data6/current/BP-234455755-172.17.0.2-1731297822701/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:45,869 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:45,871 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:45,922 WARN [Thread-103 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:45,946 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7af5d2a14deef20a with lease ID 0xfd0baa831a9342e5: Processing first storage report for DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7 from datanode DatanodeRegistration(127.0.0.1:42877, datanodeUuid=858a6258-bf87-44de-82fa-608282b3f8e9, infoPort=34669, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7af5d2a14deef20a with lease ID 0xfd0baa831a9342e5: from storage DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7 node DatanodeRegistration(127.0.0.1:42877, datanodeUuid=858a6258-bf87-44de-82fa-608282b3f8e9, infoPort=34669, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,948 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2253eaf3a0ee6f3b with lease ID 0xfd0baa831a9342e6: Processing first storage report for DS-cb67300d-81ce-4568-9d8c-a531498b481c from datanode DatanodeRegistration(127.0.0.1:33265, datanodeUuid=5f8d56fd-3e46-4296-966b-1fa4aaed217f, infoPort=34679, infoSecurePort=0, ipcPort=35857, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,948 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2253eaf3a0ee6f3b with lease ID 0xfd0baa831a9342e6: from storage DS-cb67300d-81ce-4568-9d8c-a531498b481c node DatanodeRegistration(127.0.0.1:33265, datanodeUuid=5f8d56fd-3e46-4296-966b-1fa4aaed217f, infoPort=34679, infoSecurePort=0, ipcPort=35857, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,948 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bae83ee21ee9e7c with lease ID 0xfd0baa831a9342e4: Processing first storage report for DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38 from datanode DatanodeRegistration(127.0.0.1:37915, datanodeUuid=684fad58-a049-4a16-9d3a-17748bc29807, infoPort=34377, infoSecurePort=0, ipcPort=43867, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bae83ee21ee9e7c with lease ID 0xfd0baa831a9342e4: from storage DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38 node DatanodeRegistration(127.0.0.1:37915, datanodeUuid=684fad58-a049-4a16-9d3a-17748bc29807, infoPort=34377, infoSecurePort=0, ipcPort=43867, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7af5d2a14deef20a with lease ID 0xfd0baa831a9342e5: Processing first storage report for DS-231390a1-94bb-44d4-88c7-e73901032c71 from datanode DatanodeRegistration(127.0.0.1:42877, datanodeUuid=858a6258-bf87-44de-82fa-608282b3f8e9, infoPort=34669, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,949 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7af5d2a14deef20a with lease ID 0xfd0baa831a9342e5: from storage DS-231390a1-94bb-44d4-88c7-e73901032c71 node DatanodeRegistration(127.0.0.1:42877, datanodeUuid=858a6258-bf87-44de-82fa-608282b3f8e9, infoPort=34669, infoSecurePort=0, ipcPort=35111, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,949 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2253eaf3a0ee6f3b with lease ID 0xfd0baa831a9342e6: Processing first storage report for DS-aa9ff271-b9b9-43f8-9260-b143496ebb77 from datanode DatanodeRegistration(127.0.0.1:33265, datanodeUuid=5f8d56fd-3e46-4296-966b-1fa4aaed217f, infoPort=34679, infoSecurePort=0, ipcPort=35857, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2253eaf3a0ee6f3b with lease ID 0xfd0baa831a9342e6: from storage DS-aa9ff271-b9b9-43f8-9260-b143496ebb77 node DatanodeRegistration(127.0.0.1:33265, datanodeUuid=5f8d56fd-3e46-4296-966b-1fa4aaed217f, infoPort=34679, infoSecurePort=0, ipcPort=35857, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,950 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1bae83ee21ee9e7c with lease ID 0xfd0baa831a9342e4: Processing first storage report for DS-2fb48f8d-542d-4b2b-954f-93947c296ca2 from datanode DatanodeRegistration(127.0.0.1:37915, datanodeUuid=684fad58-a049-4a16-9d3a-17748bc29807, infoPort=34377, infoSecurePort=0, ipcPort=43867, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701) 2024-11-11T04:03:45,950 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1bae83ee21ee9e7c with lease ID 0xfd0baa831a9342e4: from storage DS-2fb48f8d-542d-4b2b-954f-93947c296ca2 node DatanodeRegistration(127.0.0.1:37915, datanodeUuid=684fad58-a049-4a16-9d3a-17748bc29807, infoPort=34377, infoSecurePort=0, ipcPort=43867, storageInfo=lv=-57;cid=testClusterID;nsid=843089636;c=1731297822701), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:45,992 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257 2024-11-11T04:03:46,105 WARN [Time-limited test {}] erasurecode.ErasureCodeNative(55): ISA-L support is not available in your platform... using builtin-java codec where applicable 2024-11-11T04:03:46,192 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=155, OpenFileDescriptor=390, MaxFileDescriptor=1048576, SystemLoadAverage=479, ProcessCount=11, AvailableMemoryMB=5685 2024-11-11T04:03:46,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:03:46,195 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1143): NOT STARTING DFS 2024-11-11T04:03:46,329 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/zookeeper_0, clientPort=57522, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:03:46,365 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=57522 2024-11-11T04:03:46,380 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:46,384 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:46,519 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:46,520 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:46,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:33198 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775792_1001] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33198 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:46,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775792_1002 (size=7) 2024-11-11T04:03:47,001 WARN [Time-limited test {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:47,013 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 with version=8 2024-11-11T04:03:47,013 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/hbase-staging 2024-11-11T04:03:47,177 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T04:03:47,423 INFO [Time-limited test {}] client.ConnectionUtils(129): master/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:47,442 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:47,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:47,443 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:47,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:47,443 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:47,587 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:47,641 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T04:03:47,650 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T04:03:47,653 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:47,676 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 1475 (auto-detected) 2024-11-11T04:03:47,677 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-11T04:03:47,695 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44323 2024-11-11T04:03:47,703 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:47,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:47,718 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:44323 connecting to ZooKeeper ensemble=127.0.0.1:57522 2024-11-11T04:03:47,850 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:443230x0, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:47,853 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44323-0x1012959f8750000 connected 2024-11-11T04:03:47,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:47,938 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:47,942 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:47,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44323 2024-11-11T04:03:47,946 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44323 2024-11-11T04:03:47,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44323 2024-11-11T04:03:47,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44323 2024-11-11T04:03:47,955 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44323 2024-11-11T04:03:47,964 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6, hbase.cluster.distributed=false 2024-11-11T04:03:48,047 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:48,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,047 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,048 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:48,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,048 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:48,050 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:48,053 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:48,056 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43881 2024-11-11T04:03:48,058 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:48,063 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:48,065 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,071 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,078 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:43881 connecting to ZooKeeper ensemble=127.0.0.1:57522 2024-11-11T04:03:48,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:438810x0, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:48,083 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:438810x0, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:48,083 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43881-0x1012959f8750001 connected 2024-11-11T04:03:48,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:48,086 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:48,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43881 2024-11-11T04:03:48,091 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43881 2024-11-11T04:03:48,092 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43881 2024-11-11T04:03:48,093 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43881 2024-11-11T04:03:48,094 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43881 2024-11-11T04:03:48,115 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:48,115 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,116 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,116 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:48,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,117 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:48,117 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:48,118 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:48,119 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:44251 2024-11-11T04:03:48,120 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:48,123 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:48,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,131 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,138 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44251 connecting to ZooKeeper ensemble=127.0.0.1:57522 2024-11-11T04:03:48,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:442510x0, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:48,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:442510x0, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:48,150 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44251-0x1012959f8750002 connected 2024-11-11T04:03:48,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:48,152 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:48,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44251 2024-11-11T04:03:48,153 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44251 2024-11-11T04:03:48,154 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44251 2024-11-11T04:03:48,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44251 2024-11-11T04:03:48,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44251 2024-11-11T04:03:48,174 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:48,175 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:48,176 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:48,176 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33817 2024-11-11T04:03:48,177 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:48,178 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:48,179 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,182 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,186 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33817 connecting to ZooKeeper ensemble=127.0.0.1:57522 2024-11-11T04:03:48,190 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338170x0, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:48,191 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:338170x0, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:48,191 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33817-0x1012959f8750003 connected 2024-11-11T04:03:48,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:48,193 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:48,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-11T04:03:48,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33817 2024-11-11T04:03:48,198 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33817 2024-11-11T04:03:48,201 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-11T04:03:48,202 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33817 2024-11-11T04:03:48,207 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/388b5ced38b8,44323,1731297827171 2024-11-11T04:03:48,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,219 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/388b5ced38b8,44323,1731297827171 2024-11-11T04:03:48,223 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;388b5ced38b8:44323 2024-11-11T04:03:48,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:48,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:48,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:48,249 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,250 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,251 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:03:48,252 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/388b5ced38b8,44323,1731297827171 from backup master directory 2024-11-11T04:03:48,252 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:03:48,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/388b5ced38b8,44323,1731297827171 2024-11-11T04:03:48,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:48,259 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:48,259 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=388b5ced38b8,44323,1731297827171 2024-11-11T04:03:48,261 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T04:03:48,262 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T04:03:48,339 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,340 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,345 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47192 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775776_1003] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47192 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:48,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775776_1004 (size=42) 2024-11-11T04:03:48,356 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:48,358 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/hbase.id with ID: 5e3c1111-ac3a-42ad-8649-00f1761b8c56 2024-11-11T04:03:48,409 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:48,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,449 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:48,466 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,467 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,472 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:48066 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775760_1005] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48066 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:48,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775760_1006 (size=196) 2024-11-11T04:03:48,479 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:48,494 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:03:48,496 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:03:48,501 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T04:03:48,530 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,530 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,534 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:48082 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775744_1007] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48082 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:48,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775744_1008 (size=1189) 2024-11-11T04:03:48,543 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:48,560 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store 2024-11-11T04:03:48,572 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,573 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:48,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47216 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775728_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47216 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:48,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775728_1010 (size=34) 2024-11-11T04:03:48,582 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:48,586 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T04:03:48,587 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:48,588 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:03:48,588 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:48,589 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:48,589 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:03:48,589 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:48,589 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:48,589 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:03:48,592 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/.initializing 2024-11-11T04:03:48,592 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/WALs/388b5ced38b8,44323,1731297827171 2024-11-11T04:03:48,599 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T04:03:48,611 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C44323%2C1731297827171, suffix=, logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/WALs/388b5ced38b8,44323,1731297827171, archiveDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/oldWALs, maxLogs=10 2024-11-11T04:03:48,639 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/WALs/388b5ced38b8,44323,1731297827171/388b5ced38b8%2C44323%2C1731297827171.1731297828616, exclude list is [], retry=0 2024-11-11T04:03:48,658 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.initialize(FanOutOneBlockAsyncDFSOutputHelper.java:487) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper.access$300(FanOutOneBlockAsyncDFSOutputHelper.java:123) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$6.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:546) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputHelper$6.operationComplete(FanOutOneBlockAsyncDFSOutputHelper.java:541) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.util.NettyFutureUtils.lambda$addListener$0(NettyFutureUtils.java:56) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListener0(DefaultPromise.java:590) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners0(DefaultPromise.java:583) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListenersNow(DefaultPromise.java:559) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.notifyListeners(DefaultPromise.java:492) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setValue0(DefaultPromise.java:636) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.setSuccess0(DefaultPromise.java:625) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.DefaultPromise.trySuccess(DefaultPromise.java:105) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPromise.trySuccess(DefaultChannelPromise.java:84) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.fulfillConnectPromise(AbstractEpollChannel.java:658) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.finishConnect(AbstractEpollChannel.java:696) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollChannel$AbstractEpollUnsafe.epollOutReady(AbstractEpollChannel.java:567) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:491) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:48,659 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42877,DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7,DISK] 2024-11-11T04:03:48,659 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37915,DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38,DISK] 2024-11-11T04:03:48,659 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33265,DS-cb67300d-81ce-4568-9d8c-a531498b481c,DISK] 2024-11-11T04:03:48,662 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-11T04:03:48,700 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/WALs/388b5ced38b8,44323,1731297827171/388b5ced38b8%2C44323%2C1731297827171.1731297828616 2024-11-11T04:03:48,701 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34669:34669),(127.0.0.1/127.0.0.1:34679:34679),(127.0.0.1/127.0.0.1:34377:34377)] 2024-11-11T04:03:48,701 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:48,702 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:48,705 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,706 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,750 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,778 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:03:48,782 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:48,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:48,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,794 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:03:48,794 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:48,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:48,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:03:48,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:48,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:48,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,806 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:03:48,807 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:48,808 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:48,813 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,815 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,825 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:03:48,829 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:48,842 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:48,844 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69770730, jitterRate=0.039664894342422485}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:03:48,850 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:03:48,851 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:03:48,886 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78e0f112, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:48,925 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-11T04:03:48,941 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:03:48,941 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:03:48,945 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:03:48,946 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 2 msec 2024-11-11T04:03:48,954 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 6 msec 2024-11-11T04:03:48,954 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:03:48,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775788_1002 (size=7) 2024-11-11T04:03:48,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775789_1002 (size=7) 2024-11-11T04:03:48,988 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:03:49,005 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:03:49,040 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:03:49,044 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:03:49,048 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:03:49,057 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:03:49,060 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:03:49,065 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:03:49,074 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:03:49,075 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:03:49,082 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:03:49,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:03:49,099 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:03:49,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:49,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:49,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:49,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:49,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,110 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=388b5ced38b8,44323,1731297827171, sessionid=0x1012959f8750000, setting cluster-up flag (Was=false) 2024-11-11T04:03:49,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,157 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:03:49,159 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=388b5ced38b8,44323,1731297827171 2024-11-11T04:03:49,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,199 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:03:49,201 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=388b5ced38b8,44323,1731297827171 2024-11-11T04:03:49,220 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;388b5ced38b8:44251 2024-11-11T04:03:49,220 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;388b5ced38b8:43881 2024-11-11T04:03:49,221 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1008): ClusterId : 5e3c1111-ac3a-42ad-8649-00f1761b8c56 2024-11-11T04:03:49,221 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1008): ClusterId : 5e3c1111-ac3a-42ad-8649-00f1761b8c56 2024-11-11T04:03:49,222 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;388b5ced38b8:33817 2024-11-11T04:03:49,224 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:49,224 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:49,224 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1008): ClusterId : 5e3c1111-ac3a-42ad-8649-00f1761b8c56 2024-11-11T04:03:49,224 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:49,244 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:49,245 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:49,245 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:49,245 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:49,245 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:49,246 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:49,258 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:49,258 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:49,258 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:49,259 DEBUG [RS:2;388b5ced38b8:33817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c1b1e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:49,259 DEBUG [RS:0;388b5ced38b8:43881 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f16707b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:49,261 DEBUG [RS:0;388b5ced38b8:43881 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ddda9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:49,261 DEBUG [RS:2;388b5ced38b8:33817 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58b48572, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:49,262 DEBUG [RS:1;388b5ced38b8:44251 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d226fd2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:49,263 DEBUG [RS:1;388b5ced38b8:44251 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@84ab9d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:49,265 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:49,265 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:49,265 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:49,265 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:49,265 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:49,265 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:49,265 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:49,265 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:49,265 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:49,272 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:43881, startcode=1731297828046 2024-11-11T04:03:49,272 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:33817, startcode=1731297828174 2024-11-11T04:03:49,274 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:44251, startcode=1731297828114 2024-11-11T04:03:49,287 DEBUG [RS:1;388b5ced38b8:44251 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:49,287 DEBUG [RS:0;388b5ced38b8:43881 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:49,287 DEBUG [RS:2;388b5ced38b8:33817 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:49,327 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:49,330 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41233, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:49,330 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48865, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:49,331 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54585, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:49,335 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T04:03:49,337 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T04:03:49,339 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:03:49,343 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T04:03:49,344 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T04:03:49,347 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 388b5ced38b8,44323,1731297827171 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:03:49,352 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:49,352 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:49,352 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:49,352 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:49,353 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/388b5ced38b8:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:03:49,353 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,353 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:49,353 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,368 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:49,369 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-11T04:03:49,369 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-11T04:03:49,369 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-11T04:03:49,369 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:03:49,369 WARN [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T04:03:49,369 WARN [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T04:03:49,369 WARN [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T04:03:49,370 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731297859370 2024-11-11T04:03:49,372 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:03:49,374 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:03:49,374 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:49,374 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:03:49,378 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:03:49,379 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:03:49,379 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:03:49,380 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:03:49,389 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,393 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:49,393 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:49,394 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:03:49,396 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:03:49,397 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:03:49,400 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:03:49,402 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:03:49,404 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297829403,5,FailOnTimeoutGroup] 2024-11-11T04:03:49,405 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47244 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775712_1012] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47244 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:49,406 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297829404,5,FailOnTimeoutGroup] 2024-11-11T04:03:49,407 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,407 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:03:49,408 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,409 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775712_1013 (size=1039) 2024-11-11T04:03:49,424 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:49,425 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-11T04:03:49,426 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:49,439 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:49,439 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:49,448 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47256 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775696_1014] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47256 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:49,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775696_1015 (size=32) 2024-11-11T04:03:49,454 WARN [PEWorker-1 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:49,455 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:49,458 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:03:49,461 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:03:49,461 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:49,462 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:49,463 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:03:49,465 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:03:49,466 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:49,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:49,467 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:03:49,470 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:03:49,470 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:43881, startcode=1731297828046 2024-11-11T04:03:49,470 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:33817, startcode=1731297828174 2024-11-11T04:03:49,470 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,44323,1731297827171 with isa=388b5ced38b8/172.17.0.2:44251, startcode=1731297828114 2024-11-11T04:03:49,470 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:49,472 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:49,472 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,475 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,475 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740 2024-11-11T04:03:49,476 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740 2024-11-11T04:03:49,480 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T04:03:49,483 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T04:03:49,485 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,485 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,486 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:49,486 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:32935 2024-11-11T04:03:49,486 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:49,491 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,492 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:49,492 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44323 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,492 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:32935 2024-11-11T04:03:49,492 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:49,495 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:49,495 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:32935 2024-11-11T04:03:49,495 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:49,497 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:49,498 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68479679, jitterRate=0.020426735281944275}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T04:03:49,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:03:49,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T04:03:49,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:03:49,504 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T04:03:49,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T04:03:49,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:03:49,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:03:49,505 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T04:03:49,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T04:03:49,508 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:49,508 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-11T04:03:49,514 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:03:49,523 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:03:49,524 DEBUG [RS:2;388b5ced38b8:33817 {}] zookeeper.ZKUtil(111): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,524 WARN [RS:2;388b5ced38b8:33817 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:49,525 DEBUG [RS:0;388b5ced38b8:43881 {}] zookeeper.ZKUtil(111): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,525 INFO [RS:2;388b5ced38b8:33817 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T04:03:49,525 WARN [RS:0;388b5ced38b8:43881 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:49,525 INFO [RS:0;388b5ced38b8:43881 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T04:03:49,525 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,525 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,526 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,44251,1731297828114] 2024-11-11T04:03:49,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,43881,1731297828046] 2024-11-11T04:03:49,527 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,33817,1731297828174] 2024-11-11T04:03:49,527 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:03:49,527 DEBUG [RS:1;388b5ced38b8:44251 {}] zookeeper.ZKUtil(111): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,527 WARN [RS:1;388b5ced38b8:44251 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:49,527 INFO [RS:1;388b5ced38b8:44251 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T04:03:49,528 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,542 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:49,542 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:49,551 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:49,556 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:49,556 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:49,556 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:49,577 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:49,577 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:49,577 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:49,580 INFO [RS:2;388b5ced38b8:33817 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:49,580 INFO [RS:1;388b5ced38b8:44251 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:49,580 INFO [RS:0;388b5ced38b8:43881 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:49,580 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,580 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,580 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,581 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:49,582 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:49,584 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:49,588 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,588 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,588 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,589 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,589 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,590 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,591 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:49,591 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,591 DEBUG [RS:1;388b5ced38b8:44251 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,591 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,591 DEBUG [RS:2;388b5ced38b8:33817 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,591 DEBUG [RS:0;388b5ced38b8:43881 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:49,595 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,595 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44251,1731297828114-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,43881,1731297828046-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,596 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,33817,1731297828174-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:49,617 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:49,617 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:49,617 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:49,619 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,43881,1731297828046-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,619 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44251,1731297828114-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,619 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,33817,1731297828174-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:49,643 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.Replication(204): 388b5ced38b8,33817,1731297828174 started 2024-11-11T04:03:49,643 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.Replication(204): 388b5ced38b8,43881,1731297828046 started 2024-11-11T04:03:49,643 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,33817,1731297828174, RpcServer on 388b5ced38b8/172.17.0.2:33817, sessionid=0x1012959f8750003 2024-11-11T04:03:49,643 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.Replication(204): 388b5ced38b8,44251,1731297828114 started 2024-11-11T04:03:49,643 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,43881,1731297828046, RpcServer on 388b5ced38b8/172.17.0.2:43881, sessionid=0x1012959f8750001 2024-11-11T04:03:49,643 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,44251,1731297828114, RpcServer on 388b5ced38b8/172.17.0.2:44251, sessionid=0x1012959f8750002 2024-11-11T04:03:49,644 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:49,644 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:49,644 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:49,644 DEBUG [RS:0;388b5ced38b8:43881 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,644 DEBUG [RS:2;388b5ced38b8:33817 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,644 DEBUG [RS:1;388b5ced38b8:44251 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,644 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,43881,1731297828046' 2024-11-11T04:03:49,644 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,44251,1731297828114' 2024-11-11T04:03:49,644 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,33817,1731297828174' 2024-11-11T04:03:49,644 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:49,644 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:49,644 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:49,645 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:49,645 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:49,645 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:49,645 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:49,645 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:49,645 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:49,645 DEBUG [RS:2;388b5ced38b8:33817 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:49,645 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:49,645 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:49,645 DEBUG [RS:0;388b5ced38b8:43881 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,43881,1731297828046 2024-11-11T04:03:49,645 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,33817,1731297828174' 2024-11-11T04:03:49,645 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:49,645 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,43881,1731297828046' 2024-11-11T04:03:49,646 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:49,646 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:49,646 DEBUG [RS:1;388b5ced38b8:44251 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,646 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,44251,1731297828114' 2024-11-11T04:03:49,646 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:49,646 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:49,646 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:49,646 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:49,647 DEBUG [RS:0;388b5ced38b8:43881 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:49,647 DEBUG [RS:2;388b5ced38b8:33817 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:49,647 INFO [RS:0;388b5ced38b8:43881 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:49,647 DEBUG [RS:1;388b5ced38b8:44251 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:49,647 INFO [RS:2;388b5ced38b8:33817 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:49,647 INFO [RS:1;388b5ced38b8:44251 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:49,647 INFO [RS:0;388b5ced38b8:43881 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:49,647 INFO [RS:2;388b5ced38b8:33817 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:49,647 INFO [RS:1;388b5ced38b8:44251 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:49,678 WARN [388b5ced38b8:44323 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:03:49,751 INFO [RS:0;388b5ced38b8:43881 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T04:03:49,751 INFO [RS:2;388b5ced38b8:33817 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T04:03:49,751 INFO [RS:1;388b5ced38b8:44251 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T04:03:49,755 INFO [RS:2;388b5ced38b8:33817 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C33817%2C1731297828174, suffix=, logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,33817,1731297828174, archiveDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs, maxLogs=32 2024-11-11T04:03:49,755 INFO [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C44251%2C1731297828114, suffix=, logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114, archiveDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs, maxLogs=32 2024-11-11T04:03:49,760 INFO [RS:0;388b5ced38b8:43881 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C43881%2C1731297828046, suffix=, logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,43881,1731297828046, archiveDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs, maxLogs=32 2024-11-11T04:03:49,774 DEBUG [RS:2;388b5ced38b8:33817 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,33817,1731297828174/388b5ced38b8%2C33817%2C1731297828174.1731297829758, exclude list is [], retry=0 2024-11-11T04:03:49,779 DEBUG [RS:1;388b5ced38b8:44251 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114/388b5ced38b8%2C44251%2C1731297828114.1731297829758, exclude list is [], retry=0 2024-11-11T04:03:49,783 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37915,DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38,DISK] 2024-11-11T04:03:49,783 DEBUG [RS:0;388b5ced38b8:43881 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,43881,1731297828046/388b5ced38b8%2C43881%2C1731297828046.1731297829762, exclude list is [], retry=0 2024-11-11T04:03:49,784 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33265,DS-cb67300d-81ce-4568-9d8c-a531498b481c,DISK] 2024-11-11T04:03:49,784 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42877,DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7,DISK] 2024-11-11T04:03:49,820 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42877,DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7,DISK] 2024-11-11T04:03:49,820 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37915,DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38,DISK] 2024-11-11T04:03:49,820 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33265,DS-cb67300d-81ce-4568-9d8c-a531498b481c,DISK] 2024-11-11T04:03:49,821 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37915,DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38,DISK] 2024-11-11T04:03:49,822 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33265,DS-cb67300d-81ce-4568-9d8c-a531498b481c,DISK] 2024-11-11T04:03:49,822 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42877,DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7,DISK] 2024-11-11T04:03:49,824 INFO [RS:2;388b5ced38b8:33817 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,33817,1731297828174/388b5ced38b8%2C33817%2C1731297828174.1731297829758 2024-11-11T04:03:49,824 DEBUG [RS:2;388b5ced38b8:33817 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34669:34669),(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:34679:34679)] 2024-11-11T04:03:49,829 INFO [RS:0;388b5ced38b8:43881 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,43881,1731297828046/388b5ced38b8%2C43881%2C1731297828046.1731297829762 2024-11-11T04:03:49,829 INFO [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114/388b5ced38b8%2C44251%2C1731297828114.1731297829758 2024-11-11T04:03:49,831 DEBUG [RS:0;388b5ced38b8:43881 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34679:34679),(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:34669:34669)] 2024-11-11T04:03:49,835 DEBUG [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:34669:34669),(127.0.0.1/127.0.0.1:34679:34679)] 2024-11-11T04:03:49,929 DEBUG [388b5ced38b8:44323 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T04:03:49,932 DEBUG [388b5ced38b8:44323 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:49,939 DEBUG [388b5ced38b8:44323 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:49,939 DEBUG [388b5ced38b8:44323 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:49,939 DEBUG [388b5ced38b8:44323 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:49,939 INFO [388b5ced38b8:44323 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:49,940 INFO [388b5ced38b8:44323 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:49,940 INFO [388b5ced38b8:44323 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:49,940 DEBUG [388b5ced38b8:44323 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:49,945 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=388b5ced38b8,44251,1731297828114 2024-11-11T04:03:49,950 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 388b5ced38b8,44251,1731297828114, state=OPENING 2024-11-11T04:03:49,965 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:03:49,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:49,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:49,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:49,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:49,975 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:49,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=388b5ced38b8,44251,1731297828114}] 2024-11-11T04:03:50,151 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,44251,1731297828114 2024-11-11T04:03:50,154 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:03:50,157 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38468, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:03:50,169 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-11T04:03:50,169 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T04:03:50,170 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T04:03:50,173 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C44251%2C1731297828114.meta, suffix=.meta, logDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114, archiveDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs, maxLogs=32 2024-11-11T04:03:50,187 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114/388b5ced38b8%2C44251%2C1731297828114.meta.1731297830175.meta, exclude list is [], retry=0 2024-11-11T04:03:50,191 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42877,DS-3bde0d25-7273-444a-9c25-0f16c6b7e7e7,DISK] 2024-11-11T04:03:50,191 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:37915,DS-52edfe8f-1bf1-4dd2-9fc4-69e1211cdc38,DISK] 2024-11-11T04:03:50,191 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33265,DS-cb67300d-81ce-4568-9d8c-a531498b481c,DISK] 2024-11-11T04:03:50,194 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/WALs/388b5ced38b8,44251,1731297828114/388b5ced38b8%2C44251%2C1731297828114.meta.1731297830175.meta 2024-11-11T04:03:50,195 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34669:34669),(127.0.0.1/127.0.0.1:34377:34377),(127.0.0.1/127.0.0.1:34679:34679)] 2024-11-11T04:03:50,195 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:50,196 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:03:50,246 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:03:50,250 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:03:50,253 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:03:50,254 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:50,254 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-11T04:03:50,254 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-11T04:03:50,257 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:03:50,258 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:03:50,258 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:50,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:50,259 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:03:50,261 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:03:50,261 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:50,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:50,262 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:03:50,263 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:03:50,263 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:50,264 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:50,266 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740 2024-11-11T04:03:50,268 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740 2024-11-11T04:03:50,271 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T04:03:50,274 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T04:03:50,276 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70906564, jitterRate=0.056590139865875244}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T04:03:50,277 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T04:03:50,283 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731297830145 2024-11-11T04:03:50,293 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:03:50,293 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-11T04:03:50,294 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,44251,1731297828114 2024-11-11T04:03:50,296 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 388b5ced38b8,44251,1731297828114, state=OPEN 2024-11-11T04:03:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:50,349 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:50,349 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:50,349 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:50,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:50,350 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:50,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:03:50,359 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=388b5ced38b8,44251,1731297828114 in 372 msec 2024-11-11T04:03:50,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:03:50,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 846 msec 2024-11-11T04:03:50,371 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.1230 sec 2024-11-11T04:03:50,371 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731297830371, completionTime=-1 2024-11-11T04:03:50,371 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T04:03:50,371 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-11T04:03:50,401 DEBUG [hconnection-0x5ff0a420-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:50,403 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:50,412 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-11-11T04:03:50,412 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731297890412 2024-11-11T04:03:50,412 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731297950412 2024-11-11T04:03:50,412 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 41 msec 2024-11-11T04:03:50,442 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-11T04:03:50,448 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:50,448 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:50,448 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:50,449 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-388b5ced38b8:44323, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:50,450 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:50,455 DEBUG [master/388b5ced38b8:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-11T04:03:50,457 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-11T04:03:50,459 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:03:50,483 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-11T04:03:50,487 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:03:50,488 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:50,490 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:03:50,500 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:50,500 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:50,503 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:49198 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775680_1020] {}] datanode.DataXceiver(331): 127.0.0.1:42877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49198 dst: /127.0.0.1:42877 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:50,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775680_1021 (size=358) 2024-11-11T04:03:50,912 WARN [PEWorker-3 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:50,915 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bd9f4f7b689140ea375446d6966edfe1, NAME => 'hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:50,920 WARN [RegionOpenAndInit-hbase:namespace-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:50,920 WARN [RegionOpenAndInit-hbase:namespace-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:50,922 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:49222 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775664_1022] {}] datanode.DataXceiver(331): 127.0.0.1:42877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49222 dst: /127.0.0.1:42877 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:50,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775664_1023 (size=42) 2024-11-11T04:03:50,927 WARN [RegionOpenAndInit-hbase:namespace-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing bd9f4f7b689140ea375446d6966edfe1, disabling compactions & flushes 2024-11-11T04:03:50,928 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. after waiting 0 ms 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:50,928 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:50,928 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for bd9f4f7b689140ea375446d6966edfe1: 2024-11-11T04:03:50,930 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:03:50,935 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1731297830931"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731297830931"}]},"ts":"1731297830931"} 2024-11-11T04:03:50,953 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T04:03:50,956 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:03:50,958 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297830956"}]},"ts":"1731297830956"} 2024-11-11T04:03:50,963 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-11T04:03:51,007 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:51,008 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:51,008 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:51,008 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:51,009 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:51,009 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:51,009 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:51,009 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:51,010 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=bd9f4f7b689140ea375446d6966edfe1, ASSIGN}] 2024-11-11T04:03:51,013 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=bd9f4f7b689140ea375446d6966edfe1, ASSIGN 2024-11-11T04:03:51,015 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=bd9f4f7b689140ea375446d6966edfe1, ASSIGN; state=OFFLINE, location=388b5ced38b8,43881,1731297828046; forceNewPlan=false, retain=false 2024-11-11T04:03:51,168 INFO [388b5ced38b8:44323 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T04:03:51,169 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=bd9f4f7b689140ea375446d6966edfe1, regionState=OPENING, regionLocation=388b5ced38b8,43881,1731297828046 2024-11-11T04:03:51,176 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure bd9f4f7b689140ea375446d6966edfe1, server=388b5ced38b8,43881,1731297828046}] 2024-11-11T04:03:51,331 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,43881,1731297828046 2024-11-11T04:03:51,331 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:03:51,333 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:03:51,340 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:51,341 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => bd9f4f7b689140ea375446d6966edfe1, NAME => 'hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:51,341 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,341 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:51,342 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,342 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,344 INFO [StoreOpener-bd9f4f7b689140ea375446d6966edfe1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,347 INFO [StoreOpener-bd9f4f7b689140ea375446d6966edfe1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bd9f4f7b689140ea375446d6966edfe1 columnFamilyName info 2024-11-11T04:03:51,347 DEBUG [StoreOpener-bd9f4f7b689140ea375446d6966edfe1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:51,348 INFO [StoreOpener-bd9f4f7b689140ea375446d6966edfe1-1 {}] regionserver.HStore(327): Store=bd9f4f7b689140ea375446d6966edfe1/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:51,350 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,351 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,355 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:51,363 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:51,363 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened bd9f4f7b689140ea375446d6966edfe1; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69197910, jitterRate=0.03112921118736267}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:03:51,364 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for bd9f4f7b689140ea375446d6966edfe1: 2024-11-11T04:03:51,366 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1., pid=6, masterSystemTime=1731297831331 2024-11-11T04:03:51,369 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:51,369 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:51,371 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=bd9f4f7b689140ea375446d6966edfe1, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,43881,1731297828046 2024-11-11T04:03:51,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:03:51,381 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure bd9f4f7b689140ea375446d6966edfe1, server=388b5ced38b8,43881,1731297828046 in 199 msec 2024-11-11T04:03:51,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:03:51,385 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=bd9f4f7b689140ea375446d6966edfe1, ASSIGN in 369 msec 2024-11-11T04:03:51,387 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:03:51,388 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297831387"}]},"ts":"1731297831387"} 2024-11-11T04:03:51,390 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-11T04:03:51,434 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:03:51,439 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 974 msec 2024-11-11T04:03:51,488 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-11T04:03:51,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:51,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:51,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:51,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:51,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:51,519 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:51,521 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:51,530 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-11T04:03:51,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:51,562 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 35 msec 2024-11-11T04:03:51,574 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-11T04:03:51,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:51,602 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 27 msec 2024-11-11T04:03:51,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-11T04:03:51,640 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-11T04:03:51,641 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.381sec 2024-11-11T04:03:51,642 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:03:51,643 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:03:51,644 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:03:51,645 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:03:51,645 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:03:51,646 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:51,646 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:03:51,649 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:03:51,649 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:03:51,650 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,44323,1731297827171-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:51,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2bca0ec7 to 127.0.0.1:57522 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a4d3e03 2024-11-11T04:03:51,734 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-11T04:03:51,751 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@600c1851, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:51,757 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T04:03:51,757 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T04:03:51,770 DEBUG [hconnection-0x7b38de72-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:51,781 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38492, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:51,792 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=388b5ced38b8,44323,1731297827171 2024-11-11T04:03:51,802 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:03:51,833 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:03:51,852 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:03:51,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T04:03:51,863 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:03:51,863 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:51,864 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 9 2024-11-11T04:03:51,868 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:03:51,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:51,897 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:51,898 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:51,924 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47304 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775648_1024] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47304 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:51,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:51,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775773_1004 (size=42) 2024-11-11T04:03:51,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775772_1004 (size=42) 2024-11-11T04:03:52,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775756_1006 (size=196) 2024-11-11T04:03:52,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775708_1013 (size=1039) 2024-11-11T04:03:52,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775648_1025 (size=392) 2024-11-11T04:03:52,019 WARN [PEWorker-2 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:52,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775757_1006 (size=196) 2024-11-11T04:03:52,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775725_1010 (size=34) 2024-11-11T04:03:52,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775709_1013 (size=1039) 2024-11-11T04:03:52,052 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 363b9af670907cd383cac5c3431a120a, NAME => 'TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6 2024-11-11T04:03:52,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775741_1008 (size=1189) 2024-11-11T04:03:52,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775724_1010 (size=34) 2024-11-11T04:03:52,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775693_1015 (size=32) 2024-11-11T04:03:52,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775740_1008 (size=1189) 2024-11-11T04:03:52,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775692_1015 (size=32) 2024-11-11T04:03:52,095 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:52,095 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:52,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:47370 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775632_1026] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47370 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:52,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775632_1027 (size=51) 2024-11-11T04:03:52,126 WARN [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:52,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:52,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1681): Closing 363b9af670907cd383cac5c3431a120a, disabling compactions & flushes 2024-11-11T04:03:52,127 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. after waiting 0 ms 2024-11-11T04:03:52,127 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,128 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,128 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1635): Region close journal for 363b9af670907cd383cac5c3431a120a: 2024-11-11T04:03:52,130 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:03:52,131 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731297832130"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731297832130"}]},"ts":"1731297832130"} 2024-11-11T04:03:52,144 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T04:03:52,150 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:03:52,151 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297832150"}]},"ts":"1731297832150"} 2024-11-11T04:03:52,156 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T04:03:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:52,207 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:52,209 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:52,210 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:52,210 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:52,210 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:52,210 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:52,210 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:52,210 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:52,210 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=363b9af670907cd383cac5c3431a120a, ASSIGN}] 2024-11-11T04:03:52,215 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=363b9af670907cd383cac5c3431a120a, ASSIGN 2024-11-11T04:03:52,218 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=363b9af670907cd383cac5c3431a120a, ASSIGN; state=OFFLINE, location=388b5ced38b8,33817,1731297828174; forceNewPlan=false, retain=false 2024-11-11T04:03:52,369 INFO [388b5ced38b8:44323 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T04:03:52,369 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=363b9af670907cd383cac5c3431a120a, regionState=OPENING, regionLocation=388b5ced38b8,33817,1731297828174 2024-11-11T04:03:52,374 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 363b9af670907cd383cac5c3431a120a, server=388b5ced38b8,33817,1731297828174}] 2024-11-11T04:03:52,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:52,529 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:52,529 DEBUG [RSProcedureDispatcher-pool-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:03:52,531 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:03:52,540 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,540 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 363b9af670907cd383cac5c3431a120a, NAME => 'TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:52,541 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,541 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:52,541 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,541 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,544 INFO [StoreOpener-363b9af670907cd383cac5c3431a120a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,547 INFO [StoreOpener-363b9af670907cd383cac5c3431a120a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 363b9af670907cd383cac5c3431a120a columnFamilyName cf 2024-11-11T04:03:52,547 DEBUG [StoreOpener-363b9af670907cd383cac5c3431a120a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:52,549 INFO [StoreOpener-363b9af670907cd383cac5c3431a120a-1 {}] regionserver.HStore(327): Store=363b9af670907cd383cac5c3431a120a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:52,550 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,553 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,558 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:52,582 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:52,583 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 363b9af670907cd383cac5c3431a120a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66043504, jitterRate=-0.01587510108947754}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:03:52,584 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 363b9af670907cd383cac5c3431a120a: 2024-11-11T04:03:52,586 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a., pid=11, masterSystemTime=1731297832528 2024-11-11T04:03:52,590 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,590 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:52,594 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=363b9af670907cd383cac5c3431a120a, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,33817,1731297828174 2024-11-11T04:03:52,606 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-11T04:03:52,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 363b9af670907cd383cac5c3431a120a, server=388b5ced38b8,33817,1731297828174 in 224 msec 2024-11-11T04:03:52,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T04:03:52,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=363b9af670907cd383cac5c3431a120a, ASSIGN in 394 msec 2024-11-11T04:03:52,613 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:03:52,613 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297832613"}]},"ts":"1731297832613"} 2024-11-11T04:03:52,617 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T04:03:52,659 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:03:52,665 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestHBaseWalOnEC in 807 msec 2024-11-11T04:03:53,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:53,002 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestHBaseWalOnEC, procId: 9 completed 2024-11-11T04:03:53,002 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T04:03:53,003 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:03:53,010 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T04:03:53,011 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:03:53,011 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T04:03:53,018 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:53,021 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45008, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:53,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-11T04:03:53,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC 2024-11-11T04:03:53,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:03:53,060 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:03:53,064 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:03:53,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:03:53,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:03:53,236 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:53,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33817 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T04:03:53,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,242 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 363b9af670907cd383cac5c3431a120a 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T04:03:53,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/.tmp/cf/2c359cb9bb754272aae6dbc809a73fbb is 36, key is row/cf:cq/1731297833022/Put/seqid=0 2024-11-11T04:03:53,327 WARN [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,327 WARN [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,340 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1787683704_22 at /127.0.0.1:48202 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775616_1028] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48202 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:53,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775616_1029 (size=4787) 2024-11-11T04:03:53,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:03:53,366 WARN [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:53,366 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/.tmp/cf/2c359cb9bb754272aae6dbc809a73fbb 2024-11-11T04:03:53,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/.tmp/cf/2c359cb9bb754272aae6dbc809a73fbb as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/cf/2c359cb9bb754272aae6dbc809a73fbb 2024-11-11T04:03:53,466 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/cf/2c359cb9bb754272aae6dbc809a73fbb, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T04:03:53,472 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 363b9af670907cd383cac5c3431a120a in 231ms, sequenceid=5, compaction requested=false 2024-11-11T04:03:53,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestHBaseWalOnEC' 2024-11-11T04:03:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 363b9af670907cd383cac5c3431a120a: 2024-11-11T04:03:53,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-11T04:03:53,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-11T04:03:53,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-11T04:03:53,493 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 422 msec 2024-11-11T04:03:53,500 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC in 448 msec 2024-11-11T04:03:53,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44323 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:03:53,667 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC, procId: 12 completed 2024-11-11T04:03:53,682 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-11T04:03:53,682 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T04:03:53,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2bca0ec7 to 127.0.0.1:57522 2024-11-11T04:03:53,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:03:53,684 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=564909863, stopped=false 2024-11-11T04:03:53,684 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=388b5ced38b8,44323,1731297827171 2024-11-11T04:03:53,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:53,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:53,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:53,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:53,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:53,733 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:53,733 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-11T04:03:53,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:53,734 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,734 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,43881,1731297828046' ***** 2024-11-11T04:03:53,734 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:03:53,734 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:53,735 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:03:53,735 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:53,735 INFO [RS:0;388b5ced38b8:43881 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:03:53,735 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:53,735 INFO [RS:0;388b5ced38b8:43881 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:03:53,735 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:53,735 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:03:53,735 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(3579): Received CLOSE for bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:53,736 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,43881,1731297828046 2024-11-11T04:03:53,736 DEBUG [RS:0;388b5ced38b8:43881 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,736 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,44251,1731297828114' ***** 2024-11-11T04:03:53,736 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:03:53,736 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-11T04:03:53,736 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,33817,1731297828174' ***** 2024-11-11T04:03:53,736 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:03:53,736 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1603): Online Regions={bd9f4f7b689140ea375446d6966edfe1=hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1.} 2024-11-11T04:03:53,736 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:03:53,736 INFO [RS:1;388b5ced38b8:44251 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:03:53,736 INFO [RS:1;388b5ced38b8:44251 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:03:53,737 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,44251,1731297828114 2024-11-11T04:03:53,737 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing bd9f4f7b689140ea375446d6966edfe1, disabling compactions & flushes 2024-11-11T04:03:53,737 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:53,737 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:03:53,737 DEBUG [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1629): Waiting on bd9f4f7b689140ea375446d6966edfe1 2024-11-11T04:03:53,737 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:53,737 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:53,737 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. after waiting 0 ms 2024-11-11T04:03:53,737 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:03:53,737 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:53,737 INFO [RS:2;388b5ced38b8:33817 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:03:53,737 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:03:53,737 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing bd9f4f7b689140ea375446d6966edfe1 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-11T04:03:53,737 INFO [RS:2;388b5ced38b8:33817 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:03:53,737 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(3579): Received CLOSE for 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:53,737 DEBUG [RS:1;388b5ced38b8:44251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,738 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:03:53,738 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:03:53,738 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:03:53,738 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,33817,1731297828174 2024-11-11T04:03:53,738 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-11T04:03:53,738 DEBUG [RS:2;388b5ced38b8:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,738 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-11T04:03:53,738 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 363b9af670907cd383cac5c3431a120a, disabling compactions & flushes 2024-11-11T04:03:53,738 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1603): Online Regions={363b9af670907cd383cac5c3431a120a=TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a.} 2024-11-11T04:03:53,738 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,738 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,738 DEBUG [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1629): Waiting on 363b9af670907cd383cac5c3431a120a 2024-11-11T04:03:53,738 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. after waiting 0 ms 2024-11-11T04:03:53,738 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,741 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-11T04:03:53,741 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:03:53,741 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:03:53,741 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-11T04:03:53,741 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T04:03:53,741 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T04:03:53,741 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:03:53,742 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:03:53,742 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.51 KB heapSize=5.02 KB 2024-11-11T04:03:53,759 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/default/TestHBaseWalOnEC/363b9af670907cd383cac5c3431a120a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T04:03:53,766 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,766 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 363b9af670907cd383cac5c3431a120a: 2024-11-11T04:03:53,766 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a. 2024-11-11T04:03:53,770 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/.tmp/info/5a03d365f63548aeb19fe90c0a990b74 is 45, key is default/info:d/1731297831539/Put/seqid=0 2024-11-11T04:03:53,773 WARN [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,773 WARN [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,780 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/info/4f8abf710b084bd68b74096efa420b14 is 153, key is TestHBaseWalOnEC,,1731297831842.363b9af670907cd383cac5c3431a120a./info:regioninfo/1731297832594/Put/seqid=0 2024-11-11T04:03:53,783 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,783 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,787 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_611189537_22 at /127.0.0.1:49306 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775600_1030] {}] datanode.DataXceiver(331): 127.0.0.1:42877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49306 dst: /127.0.0.1:42877 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:53,790 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1455380182_22 at /127.0.0.1:47384 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775584_1031] {}] datanode.DataXceiver(331): 127.0.0.1:37915:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:47384 dst: /127.0.0.1:37915 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:53,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775584_1033 (size=7835) 2024-11-11T04:03:53,799 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:53,800 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.32 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/info/4f8abf710b084bd68b74096efa420b14 2024-11-11T04:03:53,801 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:53,801 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:53,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775600_1032 (size=5037) 2024-11-11T04:03:53,802 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:53,804 WARN [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:53,805 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/.tmp/info/5a03d365f63548aeb19fe90c0a990b74 2024-11-11T04:03:53,828 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/.tmp/info/5a03d365f63548aeb19fe90c0a990b74 as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/info/5a03d365f63548aeb19fe90c0a990b74 2024-11-11T04:03:53,840 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/table/62403cc586024de2aef98e449d3b824f is 52, key is TestHBaseWalOnEC/table:state/1731297832613/Put/seqid=0 2024-11-11T04:03:53,843 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,843 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:53,851 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/info/5a03d365f63548aeb19fe90c0a990b74, entries=2, sequenceid=6, filesize=4.9 K 2024-11-11T04:03:53,853 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for bd9f4f7b689140ea375446d6966edfe1 in 116ms, sequenceid=6, compaction requested=false 2024-11-11T04:03:53,853 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-11T04:03:53,860 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1455380182_22 at /127.0.0.1:48236 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775568_1034] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48236 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:53,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775568_1035 (size=5347) 2024-11-11T04:03:53,882 WARN [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:53,882 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/table/62403cc586024de2aef98e449d3b824f 2024-11-11T04:03:53,903 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/info/4f8abf710b084bd68b74096efa420b14 as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/info/4f8abf710b084bd68b74096efa420b14 2024-11-11T04:03:53,903 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/namespace/bd9f4f7b689140ea375446d6966edfe1/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T04:03:53,907 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:53,907 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for bd9f4f7b689140ea375446d6966edfe1: 2024-11-11T04:03:53,907 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1731297830458.bd9f4f7b689140ea375446d6966edfe1. 2024-11-11T04:03:53,931 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/info/4f8abf710b084bd68b74096efa420b14, entries=20, sequenceid=14, filesize=7.7 K 2024-11-11T04:03:53,934 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/.tmp/table/62403cc586024de2aef98e449d3b824f as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/table/62403cc586024de2aef98e449d3b824f 2024-11-11T04:03:53,937 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,43881,1731297828046; all regions closed. 2024-11-11T04:03:53,938 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,33817,1731297828174; all regions closed. 2024-11-11T04:03:53,942 DEBUG [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-11T04:03:53,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_1073741826_1016 (size=1298) 2024-11-11T04:03:53,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_1073741826_1016 (size=1298) 2024-11-11T04:03:53,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741826_1016 (size=1298) 2024-11-11T04:03:53,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_1073741828_1018 (size=1414) 2024-11-11T04:03:53,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_1073741828_1018 (size=1414) 2024-11-11T04:03:53,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741828_1018 (size=1414) 2024-11-11T04:03:53,956 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/table/62403cc586024de2aef98e449d3b824f, entries=4, sequenceid=14, filesize=5.2 K 2024-11-11T04:03:53,958 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.51 KB/2567, heapSize ~4.74 KB/4856, currentSize=0 B/0 for 1588230740 in 215ms, sequenceid=14, compaction requested=false 2024-11-11T04:03:53,961 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T04:03:53,963 DEBUG [RS:2;388b5ced38b8:33817 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs 2024-11-11T04:03:53,963 INFO [RS:2;388b5ced38b8:33817 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 388b5ced38b8%2C33817%2C1731297828174:(num 1731297829758) 2024-11-11T04:03:53,963 DEBUG [RS:2;388b5ced38b8:33817 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,963 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:53,963 DEBUG [RS:0;388b5ced38b8:43881 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs 2024-11-11T04:03:53,963 INFO [RS:0;388b5ced38b8:43881 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 388b5ced38b8%2C43881%2C1731297828046:(num 1731297829762) 2024-11-11T04:03:53,963 DEBUG [RS:0;388b5ced38b8:43881 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:53,963 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:53,964 INFO [RS:2;388b5ced38b8:33817 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-11T04:03:53,964 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:03:53,964 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:03:53,964 INFO [RS:0;388b5ced38b8:43881 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:03:53,964 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:03:53,964 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:03:53,964 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:03:53,964 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:03:53,964 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:03:53,964 INFO [RS:0;388b5ced38b8:43881 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43881 2024-11-11T04:03:53,964 INFO [RS:2;388b5ced38b8:33817 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33817 2024-11-11T04:03:53,966 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:03:53,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,33817,1731297828174 2024-11-11T04:03:53,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:03:53,982 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,43881,1731297828046 2024-11-11T04:03:53,982 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,43881,1731297828046] 2024-11-11T04:03:53,982 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,43881,1731297828046; numProcessing=1 2024-11-11T04:03:53,992 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-11-11T04:03:53,994 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:03:53,994 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T04:03:53,995 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T04:03:53,995 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:03:53,999 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,43881,1731297828046 already deleted, retry=false 2024-11-11T04:03:53,999 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,43881,1731297828046 expired; onlineServers=2 2024-11-11T04:03:53,999 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,33817,1731297828174] 2024-11-11T04:03:53,999 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,33817,1731297828174; numProcessing=2 2024-11-11T04:03:54,007 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,33817,1731297828174 already deleted, retry=false 2024-11-11T04:03:54,007 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,33817,1731297828174 expired; onlineServers=1 2024-11-11T04:03:54,090 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,091 INFO [RS:2;388b5ced38b8:33817 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,33817,1731297828174; zookeeper connection closed. 2024-11-11T04:03:54,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33817-0x1012959f8750003, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,091 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1902e522 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1902e522 2024-11-11T04:03:54,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,100 INFO [RS:0;388b5ced38b8:43881 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,43881,1731297828046; zookeeper connection closed. 2024-11-11T04:03:54,100 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43881-0x1012959f8750001, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,101 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d02c237 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d02c237 2024-11-11T04:03:54,142 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,44251,1731297828114; all regions closed. 2024-11-11T04:03:54,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741829_1019 (size=4015) 2024-11-11T04:03:54,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_1073741829_1019 (size=4015) 2024-11-11T04:03:54,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_1073741829_1019 (size=4015) 2024-11-11T04:03:54,155 DEBUG [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs 2024-11-11T04:03:54,155 INFO [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 388b5ced38b8%2C44251%2C1731297828114.meta:.meta(num 1731297830175) 2024-11-11T04:03:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_1073741827_1017 (size=93) 2024-11-11T04:03:54,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_1073741827_1017 (size=93) 2024-11-11T04:03:54,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741827_1017 (size=93) 2024-11-11T04:03:54,166 DEBUG [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/oldWALs 2024-11-11T04:03:54,166 INFO [RS:1;388b5ced38b8:44251 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 388b5ced38b8%2C44251%2C1731297828114:(num 1731297829758) 2024-11-11T04:03:54,166 DEBUG [RS:1;388b5ced38b8:44251 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:54,166 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:03:54,167 INFO [RS:1;388b5ced38b8:44251 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-11T04:03:54,167 INFO [RS:1;388b5ced38b8:44251 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44251 2024-11-11T04:03:54,168 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:03:54,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:03:54,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,44251,1731297828114 2024-11-11T04:03:54,174 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,44251,1731297828114] 2024-11-11T04:03:54,174 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,44251,1731297828114; numProcessing=3 2024-11-11T04:03:54,211 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,44251,1731297828114 already deleted, retry=false 2024-11-11T04:03:54,211 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,44251,1731297828114 expired; onlineServers=0 2024-11-11T04:03:54,211 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,44323,1731297827171' ***** 2024-11-11T04:03:54,211 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:03:54,211 DEBUG [M:0;388b5ced38b8:44323 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d123a1d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:54,211 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,44323,1731297827171 2024-11-11T04:03:54,212 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,44323,1731297827171; all regions closed. 2024-11-11T04:03:54,212 DEBUG [M:0;388b5ced38b8:44323 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:03:54,212 DEBUG [M:0;388b5ced38b8:44323 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:03:54,212 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:03:54,212 DEBUG [master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297829403 {}] cleaner.HFileCleaner(306): Exit Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297829403,5,FailOnTimeoutGroup] 2024-11-11T04:03:54,212 DEBUG [master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297829404 {}] cleaner.HFileCleaner(306): Exit Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297829404,5,FailOnTimeoutGroup] 2024-11-11T04:03:54,212 DEBUG [M:0;388b5ced38b8:44323 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:03:54,213 INFO [M:0;388b5ced38b8:44323 {}] hbase.ChoreService(370): Chore service for: master/388b5ced38b8:0 had [] on shutdown 2024-11-11T04:03:54,213 DEBUG [M:0;388b5ced38b8:44323 {}] master.HMaster(1733): Stopping service threads 2024-11-11T04:03:54,213 INFO [M:0;388b5ced38b8:44323 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:03:54,214 INFO [M:0;388b5ced38b8:44323 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:03:54,214 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:03:54,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:54,224 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:54,232 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:54,232 DEBUG [M:0;388b5ced38b8:44323 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/master already deleted, retry=false 2024-11-11T04:03:54,232 DEBUG [M:0;388b5ced38b8:44323 {}] master.ActiveMasterManager(353): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-11T04:03:54,232 INFO [M:0;388b5ced38b8:44323 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-11T04:03:54,232 INFO [M:0;388b5ced38b8:44323 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:03:54,233 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:03:54,233 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:54,233 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:54,233 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:03:54,233 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:54,233 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.80 KB heapSize=54.27 KB 2024-11-11T04:03:54,261 DEBUG [M:0;388b5ced38b8:44323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee41772c547475a81bb61d078651f69 is 82, key is hbase:meta,,1/info:regioninfo/1731297830293/Put/seqid=0 2024-11-11T04:03:54,264 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:54,264 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:54,275 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:48256 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775552_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48256 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:54,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,282 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44251-0x1012959f8750002, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:54,290 INFO [RS:1;388b5ced38b8:44251 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,44251,1731297828114; zookeeper connection closed. 2024-11-11T04:03:54,294 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5036f075 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5036f075 2024-11-11T04:03:54,295 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T04:03:54,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775552_1037 (size=5672) 2024-11-11T04:03:54,698 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:54,698 INFO [M:0;388b5ced38b8:44323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee41772c547475a81bb61d078651f69 2024-11-11T04:03:54,741 DEBUG [M:0;388b5ced38b8:44323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02df207f011343bc8c900b6ef9e48062 is 748, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1731297832663/Put/seqid=0 2024-11-11T04:03:54,743 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:54,743 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:54,750 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:48266 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775536_1038] {}] datanode.DataXceiver(331): 127.0.0.1:33265:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48266 dst: /127.0.0.1:33265 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:54,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775536_1039 (size=7798) 2024-11-11T04:03:54,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775677_1021 (size=358) 2024-11-11T04:03:54,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775676_1021 (size=358) 2024-11-11T04:03:54,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_-9223372036854775661_1023 (size=42) 2024-11-11T04:03:54,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775660_1023 (size=42) 2024-11-11T04:03:54,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775628_1027 (size=51) 2024-11-11T04:03:54,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775629_1027 (size=51) 2024-11-11T04:03:54,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_-9223372036854775645_1025 (size=392) 2024-11-11T04:03:54,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775644_1025 (size=392) 2024-11-11T04:03:55,161 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:55,161 INFO [M:0;388b5ced38b8:44323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.12 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02df207f011343bc8c900b6ef9e48062 2024-11-11T04:03:55,191 DEBUG [M:0;388b5ced38b8:44323 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d1315c2dbf46bc907006bb20a6c6c4 is 69, key is 388b5ced38b8,33817,1731297828174/rs:state/1731297829478/Put/seqid=0 2024-11-11T04:03:55,193 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=3, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:55,194 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(531): Cannot allocate parity block(index=4, policy=RS-3-2-1024k). Exclude nodes=[]. There may not be enough datanodes or racks. You can check if the cluster topology supports the enabled erasure coding policies by running the command 'hdfs ec -verifyClusterSetup'. 2024-11-11T04:03:55,207 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_180731555_22 at /127.0.0.1:49360 [Receiving block BP-234455755-172.17.0.2-1731297822701:blk_-9223372036854775520_1040] {}] datanode.DataXceiver(331): 127.0.0.1:42877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:49360 dst: /127.0.0.1:42877 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T04:03:55,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_-9223372036854775520_1041 (size=5294) 2024-11-11T04:03:55,217 WARN [M:0;388b5ced38b8:44323 {}] hdfs.DFSStripedOutputStream(1367): Block group <1> failed to write 2 blocks. It's at high risk of losing data. 2024-11-11T04:03:55,218 INFO [M:0;388b5ced38b8:44323 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d1315c2dbf46bc907006bb20a6c6c4 2024-11-11T04:03:55,229 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cee41772c547475a81bb61d078651f69 as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cee41772c547475a81bb61d078651f69 2024-11-11T04:03:55,239 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cee41772c547475a81bb61d078651f69, entries=8, sequenceid=111, filesize=5.5 K 2024-11-11T04:03:55,242 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/02df207f011343bc8c900b6ef9e48062 as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/02df207f011343bc8c900b6ef9e48062 2024-11-11T04:03:55,252 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/02df207f011343bc8c900b6ef9e48062, entries=13, sequenceid=111, filesize=7.6 K 2024-11-11T04:03:55,255 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/87d1315c2dbf46bc907006bb20a6c6c4 as hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87d1315c2dbf46bc907006bb20a6c6c4 2024-11-11T04:03:55,272 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/87d1315c2dbf46bc907006bb20a6c6c4, entries=3, sequenceid=111, filesize=5.2 K 2024-11-11T04:03:55,273 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(3040): Finished flush of dataSize ~43.80 KB/44851, heapSize ~53.97 KB/55264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1040ms, sequenceid=111, compaction requested=false 2024-11-11T04:03:55,275 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:55,275 DEBUG [M:0;388b5ced38b8:44323 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:03:55,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33265 is added to blk_1073741825_1011 (size=52566) 2024-11-11T04:03:55,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37915 is added to blk_1073741825_1011 (size=52566) 2024-11-11T04:03:55,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42877 is added to blk_1073741825_1011 (size=52566) 2024-11-11T04:03:55,287 INFO [M:0;388b5ced38b8:44323 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-11T04:03:55,287 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:03:55,287 INFO [M:0;388b5ced38b8:44323 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:44323 2024-11-11T04:03:55,323 DEBUG [M:0;388b5ced38b8:44323 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/388b5ced38b8,44323,1731297827171 already deleted, retry=false 2024-11-11T04:03:55,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:55,432 INFO [M:0;388b5ced38b8:44323 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,44323,1731297827171; zookeeper connection closed. 2024-11-11T04:03:55,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44323-0x1012959f8750000, quorum=127.0.0.1:57522, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:03:55,440 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@216c38ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:55,442 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@27b3056c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:03:55,442 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:03:55,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6c51149d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:03:55,443 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66733fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,STOPPED} 2024-11-11T04:03:55,445 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:03:55,445 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-234455755-172.17.0.2-1731297822701 (Datanode Uuid 5f8d56fd-3e46-4296-966b-1fa4aaed217f) service to localhost/127.0.0.1:32935 2024-11-11T04:03:55,446 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:03:55,447 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:03:55,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data5/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data6/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,447 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:03:55,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3af8c6aa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:55,450 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5a19409c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:03:55,450 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:03:55,450 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@31a55d25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:03:55,451 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3c2481f4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,STOPPED} 2024-11-11T04:03:55,451 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:03:55,451 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:03:55,452 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-234455755-172.17.0.2-1731297822701 (Datanode Uuid 684fad58-a049-4a16-9d3a-17748bc29807) service to localhost/127.0.0.1:32935 2024-11-11T04:03:55,452 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:03:55,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data3/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,452 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data4/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,453 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:03:55,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@61a09aaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:55,457 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@487ce309{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:03:55,457 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:03:55,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@72bec311{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:03:55,457 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@657ab07f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,STOPPED} 2024-11-11T04:03:55,465 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:03:55,465 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:03:55,465 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:03:55,465 WARN [BP-234455755-172.17.0.2-1731297822701 heartbeating to localhost/127.0.0.1:32935 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-234455755-172.17.0.2-1731297822701 (Datanode Uuid 858a6258-bf87-44de-82fa-608282b3f8e9) service to localhost/127.0.0.1:32935 2024-11-11T04:03:55,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data1/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/cluster_164ab392-14a9-c9c4-69e2-960a25ea1fe1/dfs/data/data2/current/BP-234455755-172.17.0.2-1731297822701 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:03:55,466 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:03:55,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3f706372{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:03:55,478 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2c67d4dc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:03:55,478 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:03:55,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a436cce{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:03:55,479 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c3a779d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir/,STOPPED} 2024-11-11T04:03:55,488 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:03:55,530 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-11-11T04:03:55,538 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[0] Thread=85 (was 155), OpenFileDescriptor=450 (was 390) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=497 (was 479) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5168 (was 5685) 2024-11-11T04:03:55,544 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=85, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=497, ProcessCount=11, AvailableMemoryMB=5168 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.log.dir so I do NOT create it in target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(451): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/faec6644-a59e-aa04-dce6-72f651687257/hadoop.tmp.dir so I do NOT create it in target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2, deleteOnExit=true 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/test.cache.data in system properties and HBase conf 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir in system properties and HBase conf 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T04:03:55,545 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-11T04:03:55,546 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T04:03:55,546 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/nfs.dump.dir in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/java.io.tmpdir in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T04:03:55,547 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T04:03:55,769 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T04:03:55,778 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:03:55,825 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:03:55,826 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-11T04:03:55,854 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:55,863 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:55,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:55,873 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:55,873 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:03:55,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:55,874 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@57f6d17f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:55,875 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@953b9cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:55,979 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b189467{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/java.io.tmpdir/jetty-localhost-38977-hadoop-hdfs-3_4_1-tests_jar-_-any-9636532678981899262/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:03:55,980 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@41822124{HTTP/1.1, (http/1.1)}{localhost:38977} 2024-11-11T04:03:55,980 INFO [Time-limited test {}] server.Server(415): Started @16602ms 2024-11-11T04:03:56,178 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:56,182 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:56,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:56,183 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:56,183 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T04:03:56,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@464ee984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:56,185 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59792bf6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:56,292 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@71fc4ad0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/java.io.tmpdir/jetty-localhost-46429-hadoop-hdfs-3_4_1-tests_jar-_-any-1921761389511040090/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:56,293 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b3638bf{HTTP/1.1, (http/1.1)}{localhost:46429} 2024-11-11T04:03:56,293 INFO [Time-limited test {}] server.Server(415): Started @16915ms 2024-11-11T04:03:56,295 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:56,337 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:56,342 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:56,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:56,346 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:56,346 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:03:56,348 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bb43762{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:56,349 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bdb6514{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:56,445 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@35ecfb16{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/java.io.tmpdir/jetty-localhost-35319-hadoop-hdfs-3_4_1-tests_jar-_-any-7303678368681616532/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:56,445 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5ad4b412{HTTP/1.1, (http/1.1)}{localhost:35319} 2024-11-11T04:03:56,445 INFO [Time-limited test {}] server.Server(415): Started @17067ms 2024-11-11T04:03:56,448 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:56,488 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T04:03:56,491 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T04:03:56,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T04:03:56,492 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T04:03:56,492 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T04:03:56,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37ab8214{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,AVAILABLE} 2024-11-11T04:03:56,493 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1146e519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T04:03:56,589 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4d486b54{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/java.io.tmpdir/jetty-localhost-38719-hadoop-hdfs-3_4_1-tests_jar-_-any-9021617336655409554/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:03:56,589 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7e2e33b1{HTTP/1.1, (http/1.1)}{localhost:38719} 2024-11-11T04:03:56,590 INFO [Time-limited test {}] server.Server(415): Started @17211ms 2024-11-11T04:03:56,594 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T04:03:56,966 WARN [Thread-601 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data1/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:56,966 WARN [Thread-602 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data2/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:56,982 WARN [Thread-544 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9018bd582b8d097f with lease ID 0x50104c629c962378: Processing first storage report for DS-18eda0e7-a512-490f-85b4-01524c2919a1 from datanode DatanodeRegistration(127.0.0.1:45571, datanodeUuid=f060a0a8-a37a-43b2-b990-b632b26e6aff, infoPort=43023, infoSecurePort=0, ipcPort=46565, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9018bd582b8d097f with lease ID 0x50104c629c962378: from storage DS-18eda0e7-a512-490f-85b4-01524c2919a1 node DatanodeRegistration(127.0.0.1:45571, datanodeUuid=f060a0a8-a37a-43b2-b990-b632b26e6aff, infoPort=43023, infoSecurePort=0, ipcPort=46565, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9018bd582b8d097f with lease ID 0x50104c629c962378: Processing first storage report for DS-e277ebb6-f593-4037-8b7f-fdb290387e42 from datanode DatanodeRegistration(127.0.0.1:45571, datanodeUuid=f060a0a8-a37a-43b2-b990-b632b26e6aff, infoPort=43023, infoSecurePort=0, ipcPort=46565, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9018bd582b8d097f with lease ID 0x50104c629c962378: from storage DS-e277ebb6-f593-4037-8b7f-fdb290387e42 node DatanodeRegistration(127.0.0.1:45571, datanodeUuid=f060a0a8-a37a-43b2-b990-b632b26e6aff, infoPort=43023, infoSecurePort=0, ipcPort=46565, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:57,570 WARN [Thread-616 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data4/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:57,570 WARN [Thread-615 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data3/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:57,588 WARN [Thread-567 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:57,591 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd82b1e45eaf6fb29 with lease ID 0x50104c629c962379: Processing first storage report for DS-3ae9723b-629f-47ee-8bce-dca904445da9 from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=662e5429-60ae-4511-baf9-0321b1fbea7c, infoPort=45093, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:57,591 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd82b1e45eaf6fb29 with lease ID 0x50104c629c962379: from storage DS-3ae9723b-629f-47ee-8bce-dca904445da9 node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=662e5429-60ae-4511-baf9-0321b1fbea7c, infoPort=45093, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-11T04:03:57,592 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd82b1e45eaf6fb29 with lease ID 0x50104c629c962379: Processing first storage report for DS-3a5d950b-a524-4447-95a7-6b34a24f9e0d from datanode DatanodeRegistration(127.0.0.1:44705, datanodeUuid=662e5429-60ae-4511-baf9-0321b1fbea7c, infoPort=45093, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:57,592 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd82b1e45eaf6fb29 with lease ID 0x50104c629c962379: from storage DS-3a5d950b-a524-4447-95a7-6b34a24f9e0d node DatanodeRegistration(127.0.0.1:44705, datanodeUuid=662e5429-60ae-4511-baf9-0321b1fbea7c, infoPort=45093, infoSecurePort=0, ipcPort=43145, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:57,654 WARN [Thread-626 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data5/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:57,654 WARN [Thread-627 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data6/current/BP-2067460977-172.17.0.2-1731297835580/current, will proceed with Du for space computation calculation, 2024-11-11T04:03:57,673 WARN [Thread-590 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T04:03:57,675 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x564baaf40c8a7f79 with lease ID 0x50104c629c96237a: Processing first storage report for DS-9a700f2d-86dd-4773-897f-da51a4fbb794 from datanode DatanodeRegistration(127.0.0.1:37665, datanodeUuid=c961dbae-8b51-4b94-8975-f4ab35153047, infoPort=40251, infoSecurePort=0, ipcPort=40467, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x564baaf40c8a7f79 with lease ID 0x50104c629c96237a: from storage DS-9a700f2d-86dd-4773-897f-da51a4fbb794 node DatanodeRegistration(127.0.0.1:37665, datanodeUuid=c961dbae-8b51-4b94-8975-f4ab35153047, infoPort=40251, infoSecurePort=0, ipcPort=40467, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x564baaf40c8a7f79 with lease ID 0x50104c629c96237a: Processing first storage report for DS-5de0963e-ffd6-46ea-a0cf-de18870c68bc from datanode DatanodeRegistration(127.0.0.1:37665, datanodeUuid=c961dbae-8b51-4b94-8975-f4ab35153047, infoPort=40251, infoSecurePort=0, ipcPort=40467, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580) 2024-11-11T04:03:57,676 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x564baaf40c8a7f79 with lease ID 0x50104c629c96237a: from storage DS-5de0963e-ffd6-46ea-a0cf-de18870c68bc node DatanodeRegistration(127.0.0.1:37665, datanodeUuid=c961dbae-8b51-4b94-8975-f4ab35153047, infoPort=40251, infoSecurePort=0, ipcPort=40467, storageInfo=lv=-57;cid=testClusterID;nsid=2055328404;c=1731297835580), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T04:03:57,733 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0 2024-11-11T04:03:57,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/zookeeper_0, clientPort=63140, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T04:03:57,738 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63140 2024-11-11T04:03:57,738 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,740 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:03:57,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:03:57,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741825_1001 (size=7) 2024-11-11T04:03:57,757 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 with version=8 2024-11-11T04:03:57,757 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1462): The hbase.fs.tmp.dir is set to hdfs://localhost:32935/user/jenkins/test-data/000b05ab-a8b0-8602-8e68-70f4ea58c4a6/hbase-staging 2024-11-11T04:03:57,760 INFO [Time-limited test {}] client.ConnectionUtils(129): master/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:57,760 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:57,761 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:57,762 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38915 2024-11-11T04:03:57,762 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,764 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,767 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:38915 connecting to ZooKeeper ensemble=127.0.0.1:63140 2024-11-11T04:03:57,818 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:389150x0, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:57,818 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38915-0x101295a251d0000 connected 2024-11-11T04:03:57,882 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:57,884 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:57,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:57,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38915 2024-11-11T04:03:57,886 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38915 2024-11-11T04:03:57,887 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38915 2024-11-11T04:03:57,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38915 2024-11-11T04:03:57,888 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38915 2024-11-11T04:03:57,889 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3, hbase.cluster.distributed=false 2024-11-11T04:03:57,911 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:57,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,911 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:57,912 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:38541 2024-11-11T04:03:57,913 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:57,913 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:57,915 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,917 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,919 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:38541 connecting to ZooKeeper ensemble=127.0.0.1:63140 2024-11-11T04:03:57,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:385410x0, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:57,924 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38541-0x101295a251d0001 connected 2024-11-11T04:03:57,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:57,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:57,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:57,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38541 2024-11-11T04:03:57,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38541 2024-11-11T04:03:57,926 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38541 2024-11-11T04:03:57,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38541 2024-11-11T04:03:57,927 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38541 2024-11-11T04:03:57,943 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:57,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,943 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:57,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,943 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:57,944 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:57,944 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:57,944 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:42791 2024-11-11T04:03:57,944 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:57,945 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:57,946 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,947 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,950 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:42791 connecting to ZooKeeper ensemble=127.0.0.1:63140 2024-11-11T04:03:57,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:427910x0, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:57,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42791-0x101295a251d0002 connected 2024-11-11T04:03:57,957 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:57,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:57,958 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:57,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-11T04:03:57,959 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42791 2024-11-11T04:03:57,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42791 2024-11-11T04:03:57,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-11T04:03:57,960 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42791 2024-11-11T04:03:57,975 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/388b5ced38b8:0 server-side Connection retries=45 2024-11-11T04:03:57,975 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T04:03:57,976 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T04:03:57,977 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:32979 2024-11-11T04:03:57,977 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T04:03:57,978 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T04:03:57,978 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,980 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:57,982 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:32979 connecting to ZooKeeper ensemble=127.0.0.1:63140 2024-11-11T04:03:57,990 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:329790x0, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T04:03:57,990 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:329790x0, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:03:57,990 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:32979-0x101295a251d0003 connected 2024-11-11T04:03:57,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:03:57,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T04:03:57,992 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-11T04:03:57,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32979 2024-11-11T04:03:57,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32979 2024-11-11T04:03:57,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-11T04:03:57,993 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32979 2024-11-11T04:03:57,994 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/388b5ced38b8,38915,1731297837759 2024-11-11T04:03:57,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:57,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:57,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:57,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:57,999 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,006 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;388b5ced38b8:38915 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,007 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,008 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:03:58,008 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/388b5ced38b8,38915,1731297837759 from backup master directory 2024-11-11T04:03:58,008 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T04:03:58,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:58,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:58,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:58,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,016 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:58,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T04:03:58,016 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:03:58,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:03:58,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741826_1002 (size=42) 2024-11-11T04:03:58,035 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/hbase.id with ID: d1901d92-6655-4972-9529-f4542e15d98c 2024-11-11T04:03:58,049 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T04:03:58,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:03:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:03:58,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741827_1003 (size=196) 2024-11-11T04:03:58,069 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:03:58,069 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T04:03:58,070 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:03:58,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:03:58,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:03:58,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741828_1004 (size=1189) 2024-11-11T04:03:58,083 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store 2024-11-11T04:03:58,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:03:58,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:03:58,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741829_1005 (size=34) 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:03:58,093 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:58,093 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:03:58,093 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:03:58,094 WARN [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/.initializing 2024-11-11T04:03:58,094 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/WALs/388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,098 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C38915%2C1731297837759, suffix=, logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/WALs/388b5ced38b8,38915,1731297837759, archiveDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/oldWALs, maxLogs=10 2024-11-11T04:03:58,100 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 388b5ced38b8%2C38915%2C1731297837759.1731297838100 2024-11-11T04:03:58,101 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(752): Using builder API via reflection for DFS file creation replicate flag. 2024-11-11T04:03:58,101 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] util.CommonFSUtils$DfsBuilderUtility(762): Using builder API via reflection for DFS file creation noLocalWrite flag. 2024-11-11T04:03:58,112 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/WALs/388b5ced38b8,38915,1731297837759/388b5ced38b8%2C38915%2C1731297837759.1731297838100 2024-11-11T04:03:58,112 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43023:43023),(127.0.0.1/127.0.0.1:45093:45093),(127.0.0.1/127.0.0.1:40251:40251)] 2024-11-11T04:03:58,112 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:58,113 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:58,113 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,113 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T04:03:58,117 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T04:03:58,119 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:58,120 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T04:03:58,122 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:58,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,125 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T04:03:58,125 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,126 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:58,127 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,128 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,130 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T04:03:58,132 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T04:03:58,134 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:58,135 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73845679, jitterRate=0.1003863662481308}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T04:03:58,136 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:03:58,136 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T04:03:58,141 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7578656c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:58,142 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-11T04:03:58,142 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T04:03:58,142 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T04:03:58,142 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T04:03:58,143 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 0 msec 2024-11-11T04:03:58,143 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 0 msec 2024-11-11T04:03:58,143 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T04:03:58,145 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T04:03:58,146 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T04:03:58,156 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-11T04:03:58,157 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T04:03:58,157 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T04:03:58,165 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-11T04:03:58,165 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T04:03:58,166 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T04:03:58,173 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-11T04:03:58,174 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T04:03:58,182 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T04:03:58,183 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T04:03:58,190 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T04:03:58,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:58,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:58,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:58,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,198 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T04:03:58,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,199 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=388b5ced38b8,38915,1731297837759, sessionid=0x101295a251d0000, setting cluster-up flag (Was=false) 2024-11-11T04:03:58,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,240 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T04:03:58,241 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,282 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T04:03:58,283 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=388b5ced38b8,38915,1731297837759 2024-11-11T04:03:58,286 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:58,287 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T04:03:58,287 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 388b5ced38b8,38915,1731297837759 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/388b5ced38b8:0, corePoolSize=5, maxPoolSize=5 2024-11-11T04:03:58,287 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/388b5ced38b8:0, corePoolSize=10, maxPoolSize=10 2024-11-11T04:03:58,288 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,288 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:58,288 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731297868288 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T04:03:58,289 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,290 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:58,290 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T04:03:58,290 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-11T04:03:58,290 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T04:03:58,290 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T04:03:58,290 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T04:03:58,290 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T04:03:58,290 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297838290,5,FailOnTimeoutGroup] 2024-11-11T04:03:58,291 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297838290,5,FailOnTimeoutGroup] 2024-11-11T04:03:58,291 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,291 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T04:03:58,291 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,291 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,291 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,291 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:03:58,307 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;388b5ced38b8:42791 2024-11-11T04:03:58,307 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;388b5ced38b8:38541 2024-11-11T04:03:58,307 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;388b5ced38b8:32979 2024-11-11T04:03:58,308 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1008): ClusterId : d1901d92-6655-4972-9529-f4542e15d98c 2024-11-11T04:03:58,308 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:58,308 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1008): ClusterId : d1901d92-6655-4972-9529-f4542e15d98c 2024-11-11T04:03:58,308 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1008): ClusterId : d1901d92-6655-4972-9529-f4542e15d98c 2024-11-11T04:03:58,308 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:58,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741831_1007 (size=1039) 2024-11-11T04:03:58,308 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T04:03:58,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741831_1007 (size=1039) 2024-11-11T04:03:58,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741831_1007 (size=1039) 2024-11-11T04:03:58,311 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-11T04:03:58,311 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:03:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:03:58,322 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741832_1008 (size=32) 2024-11-11T04:03:58,323 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:58,324 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:58,324 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:58,324 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:58,324 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:58,324 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T04:03:58,324 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T04:03:58,327 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:03:58,328 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:03:58,329 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:03:58,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:03:58,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:03:58,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:03:58,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740 2024-11-11T04:03:58,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740 2024-11-11T04:03:58,337 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T04:03:58,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T04:03:58,340 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:58,340 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:58,341 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T04:03:58,341 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:58,341 DEBUG [RS:1;388b5ced38b8:42791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8df20c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:58,341 DEBUG [RS:2;388b5ced38b8:32979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72e93583, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:58,341 DEBUG [RS:0;388b5ced38b8:38541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6109ebfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:58,341 DEBUG [RS:1;388b5ced38b8:42791 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65fd7958, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:58,341 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:58,341 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62136530, jitterRate=-0.07409355044364929}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T04:03:58,341 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:58,342 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:58,342 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,38915,1731297837759 with isa=388b5ced38b8/172.17.0.2:42791, startcode=1731297837943 2024-11-11T04:03:58,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T04:03:58,342 DEBUG [RS:1;388b5ced38b8:42791 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:58,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:03:58,342 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T04:03:58,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T04:03:58,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:03:58,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:03:58,343 DEBUG [RS:2;388b5ced38b8:32979 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fee41b1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:58,343 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:58,343 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:58,343 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:58,343 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,38915,1731297837759 with isa=388b5ced38b8/172.17.0.2:32979, startcode=1731297837975 2024-11-11T04:03:58,343 DEBUG [RS:2;388b5ced38b8:32979 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:58,350 DEBUG [RS:0;388b5ced38b8:38541 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d82257f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:03:58,350 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T04:03:58,350 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T04:03:58,350 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T04:03:58,350 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T04:03:58,350 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T04:03:58,351 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(3073): reportForDuty to master=388b5ced38b8,38915,1731297837759 with isa=388b5ced38b8/172.17.0.2:38541, startcode=1731297837911 2024-11-11T04:03:58,351 DEBUG [RS:0;388b5ced38b8:38541 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T04:03:58,352 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T04:03:58,352 INFO [RS-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56703, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:58,352 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-11T04:03:58,352 INFO [RS-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33677, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:58,352 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T04:03:58,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,352 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,353 INFO [RS-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33481, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T04:03:58,354 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T04:03:58,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,354 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,354 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:58,354 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34003 2024-11-11T04:03:58,354 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:58,355 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T04:03:58,356 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,356 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38915 {}] master.ServerManager(486): Registering regionserver=388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,356 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:58,356 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34003 2024-11-11T04:03:58,356 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:58,357 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:58,357 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:34003 2024-11-11T04:03:58,357 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T04:03:58,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:03:58,394 DEBUG [RS:1;388b5ced38b8:42791 {}] zookeeper.ZKUtil(111): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,394 WARN [RS:1;388b5ced38b8:42791 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:58,394 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,38541,1731297837911] 2024-11-11T04:03:58,394 DEBUG [RS:2;388b5ced38b8:32979 {}] zookeeper.ZKUtil(111): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,394 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,42791,1731297837943] 2024-11-11T04:03:58,394 INFO [RS:1;388b5ced38b8:42791 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:03:58,394 DEBUG [RS:0;388b5ced38b8:38541 {}] zookeeper.ZKUtil(111): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,394 WARN [RS:2;388b5ced38b8:32979 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:58,394 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [388b5ced38b8,32979,1731297837975] 2024-11-11T04:03:58,394 WARN [RS:0;388b5ced38b8:38541 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T04:03:58,394 INFO [RS:2;388b5ced38b8:32979 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:03:58,394 INFO [RS:0;388b5ced38b8:38541 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:03:58,394 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,394 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,394 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,398 DEBUG [RS:1;388b5ced38b8:42791 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:58,398 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:58,398 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:58,398 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:58,401 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T04:03:58,401 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T04:03:58,404 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:58,405 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:58,405 INFO [RS:1;388b5ced38b8:42791 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:58,405 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,407 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:58,407 INFO [RS:2;388b5ced38b8:32979 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:58,407 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,408 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,408 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,409 DEBUG [RS:1;388b5ced38b8:42791 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,410 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:58,411 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T04:03:58,411 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,412 DEBUG [RS:2;388b5ced38b8:32979 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,413 INFO [RS:0;388b5ced38b8:38541 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T04:03:58,413 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,414 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,32979,1731297837975-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:58,415 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,415 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,42791,1731297837943-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:58,418 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T04:03:58,419 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,419 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/388b5ced38b8:0, corePoolSize=2, maxPoolSize=2 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/388b5ced38b8:0, corePoolSize=1, maxPoolSize=1 2024-11-11T04:03:58,420 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,421 DEBUG [RS:0;388b5ced38b8:38541 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0, corePoolSize=3, maxPoolSize=3 2024-11-11T04:03:58,421 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,422 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,422 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,422 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,422 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38541,1731297837911-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:58,432 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:58,432 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,32979,1731297837975-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,437 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:58,438 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,42791,1731297837943-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,441 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T04:03:58,441 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38541,1731297837911-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:58,446 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.Replication(204): 388b5ced38b8,32979,1731297837975 started 2024-11-11T04:03:58,447 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,32979,1731297837975, RpcServer on 388b5ced38b8/172.17.0.2:32979, sessionid=0x101295a251d0003 2024-11-11T04:03:58,447 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:58,447 DEBUG [RS:2;388b5ced38b8:32979 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,447 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,32979,1731297837975' 2024-11-11T04:03:58,447 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:58,447 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,32979,1731297837975 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,32979,1731297837975' 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:58,448 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:58,449 DEBUG [RS:2;388b5ced38b8:32979 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:58,449 INFO [RS:2;388b5ced38b8:32979 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:58,449 INFO [RS:2;388b5ced38b8:32979 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:58,457 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.Replication(204): 388b5ced38b8,42791,1731297837943 started 2024-11-11T04:03:58,457 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,42791,1731297837943, RpcServer on 388b5ced38b8/172.17.0.2:42791, sessionid=0x101295a251d0002 2024-11-11T04:03:58,458 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:58,458 DEBUG [RS:1;388b5ced38b8:42791 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,458 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,42791,1731297837943' 2024-11-11T04:03:58,458 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:58,458 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,42791,1731297837943 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,42791,1731297837943' 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:58,459 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:58,460 DEBUG [RS:1;388b5ced38b8:42791 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:58,460 INFO [RS:1;388b5ced38b8:42791 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:58,460 INFO [RS:1;388b5ced38b8:42791 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:58,460 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.Replication(204): 388b5ced38b8,38541,1731297837911 started 2024-11-11T04:03:58,460 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1767): Serving as 388b5ced38b8,38541,1731297837911, RpcServer on 388b5ced38b8/172.17.0.2:38541, sessionid=0x101295a251d0001 2024-11-11T04:03:58,460 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T04:03:58,460 DEBUG [RS:0;388b5ced38b8:38541 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,460 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,38541,1731297837911' 2024-11-11T04:03:58,460 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T04:03:58,460 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '388b5ced38b8,38541,1731297837911' 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T04:03:58,461 DEBUG [RS:0;388b5ced38b8:38541 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T04:03:58,461 INFO [RS:0;388b5ced38b8:38541 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T04:03:58,461 INFO [RS:0;388b5ced38b8:38541 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T04:03:58,506 WARN [388b5ced38b8:38915 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-11-11T04:03:58,552 INFO [RS:2;388b5ced38b8:32979 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C32979%2C1731297837975, suffix=, logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,32979,1731297837975, archiveDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs, maxLogs=32 2024-11-11T04:03:58,556 INFO [RS:2;388b5ced38b8:32979 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 388b5ced38b8%2C32979%2C1731297837975.1731297838556 2024-11-11T04:03:58,562 INFO [RS:1;388b5ced38b8:42791 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C42791%2C1731297837943, suffix=, logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,42791,1731297837943, archiveDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs, maxLogs=32 2024-11-11T04:03:58,564 INFO [RS:1;388b5ced38b8:42791 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 388b5ced38b8%2C42791%2C1731297837943.1731297838564 2024-11-11T04:03:58,565 INFO [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C38541%2C1731297837911, suffix=, logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911, archiveDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs, maxLogs=32 2024-11-11T04:03:58,566 INFO [RS:0;388b5ced38b8:38541 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 388b5ced38b8%2C38541%2C1731297837911.1731297838566 2024-11-11T04:03:58,567 INFO [RS:2;388b5ced38b8:32979 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,32979,1731297837975/388b5ced38b8%2C32979%2C1731297837975.1731297838556 2024-11-11T04:03:58,567 DEBUG [RS:2;388b5ced38b8:32979 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40251:40251),(127.0.0.1/127.0.0.1:43023:43023),(127.0.0.1/127.0.0.1:45093:45093)] 2024-11-11T04:03:58,574 INFO [RS:1;388b5ced38b8:42791 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,42791,1731297837943/388b5ced38b8%2C42791%2C1731297837943.1731297838564 2024-11-11T04:03:58,574 DEBUG [RS:1;388b5ced38b8:42791 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43023:43023),(127.0.0.1/127.0.0.1:40251:40251),(127.0.0.1/127.0.0.1:45093:45093)] 2024-11-11T04:03:58,575 INFO [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911/388b5ced38b8%2C38541%2C1731297837911.1731297838566 2024-11-11T04:03:58,575 DEBUG [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45093:45093),(127.0.0.1/127.0.0.1:43023:43023),(127.0.0.1/127.0.0.1:40251:40251)] 2024-11-11T04:03:58,756 DEBUG [388b5ced38b8:38915 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-11T04:03:58,756 DEBUG [388b5ced38b8:38915 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:58,760 DEBUG [388b5ced38b8:38915 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:58,760 DEBUG [388b5ced38b8:38915 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:58,760 DEBUG [388b5ced38b8:38915 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:58,760 INFO [388b5ced38b8:38915 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:58,760 INFO [388b5ced38b8:38915 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:58,760 INFO [388b5ced38b8:38915 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:58,760 DEBUG [388b5ced38b8:38915 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:58,761 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,764 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 388b5ced38b8,38541,1731297837911, state=OPENING 2024-11-11T04:03:58,798 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T04:03:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:58,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=388b5ced38b8,38541,1731297837911}] 2024-11-11T04:03:58,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:58,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:58,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:58,809 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:58,966 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,38541,1731297837911 2024-11-11T04:03:58,966 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:03:58,968 INFO [RS-EventLoopGroup-8-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59066, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:03:58,972 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-11T04:03:58,973 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-11T04:03:58,975 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=388b5ced38b8%2C38541%2C1731297837911.meta, suffix=.meta, logDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911, archiveDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs, maxLogs=32 2024-11-11T04:03:58,977 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 388b5ced38b8%2C38541%2C1731297837911.meta.1731297838977.meta 2024-11-11T04:03:58,989 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911/388b5ced38b8%2C38541%2C1731297837911.meta.1731297838977.meta 2024-11-11T04:03:58,989 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43023:43023),(127.0.0.1/127.0.0.1:40251:40251),(127.0.0.1/127.0.0.1:45093:45093)] 2024-11-11T04:03:58,989 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T04:03:58,990 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-11T04:03:58,990 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-11T04:03:58,992 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T04:03:58,994 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T04:03:58,994 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,995 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T04:03:58,996 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T04:03:58,996 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:58,997 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T04:03:58,998 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T04:03:58,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:58,999 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T04:03:59,001 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740 2024-11-11T04:03:59,003 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740 2024-11-11T04:03:59,004 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-11-11T04:03:59,006 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T04:03:59,007 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74759821, jitterRate=0.11400814354419708}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-11-11T04:03:59,008 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T04:03:59,010 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731297838965 2024-11-11T04:03:59,013 DEBUG [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T04:03:59,013 INFO [RS_OPEN_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-11T04:03:59,014 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,38541,1731297837911 2024-11-11T04:03:59,016 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 388b5ced38b8,38541,1731297837911, state=OPEN 2024-11-11T04:03:59,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:59,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:59,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:59,024 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T04:03:59,024 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:59,024 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:59,024 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:59,024 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T04:03:59,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T04:03:59,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=388b5ced38b8,38541,1731297837911 in 215 msec 2024-11-11T04:03:59,031 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T04:03:59,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 676 msec 2024-11-11T04:03:59,035 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 747 msec 2024-11-11T04:03:59,035 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731297839035, completionTime=-1 2024-11-11T04:03:59,035 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-11T04:03:59,035 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-11T04:03:59,036 DEBUG [hconnection-0x6f692ca-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:59,038 INFO [RS-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59076, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:59,040 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-11-11T04:03:59,040 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731297899040 2024-11-11T04:03:59,040 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731297959040 2024-11-11T04:03:59,040 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 4 msec 2024-11-11T04:03:59,057 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,057 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,057 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,057 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-388b5ced38b8:38915, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,057 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,058 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-11T04:03:59,058 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T04:03:59,060 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-11T04:03:59,060 DEBUG [master/388b5ced38b8:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-11T04:03:59,061 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:03:59,061 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:59,063 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:03:59,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741837_1013 (size=358) 2024-11-11T04:03:59,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741837_1013 (size=358) 2024-11-11T04:03:59,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741837_1013 (size=358) 2024-11-11T04:03:59,084 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9cbdba57d6e499a98b815c1373b16dad, NAME => 'hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741838_1014 (size=42) 2024-11-11T04:03:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741838_1014 (size=42) 2024-11-11T04:03:59,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741838_1014 (size=42) 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 9cbdba57d6e499a98b815c1373b16dad, disabling compactions & flushes 2024-11-11T04:03:59,102 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. after waiting 0 ms 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,102 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,102 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9cbdba57d6e499a98b815c1373b16dad: 2024-11-11T04:03:59,104 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:03:59,104 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1731297839104"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731297839104"}]},"ts":"1731297839104"} 2024-11-11T04:03:59,108 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T04:03:59,110 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:03:59,110 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297839110"}]},"ts":"1731297839110"} 2024-11-11T04:03:59,113 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-11T04:03:59,132 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:59,133 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:59,133 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:59,133 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:59,133 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:59,133 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:59,133 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:59,133 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:59,134 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9cbdba57d6e499a98b815c1373b16dad, ASSIGN}] 2024-11-11T04:03:59,136 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=9cbdba57d6e499a98b815c1373b16dad, ASSIGN 2024-11-11T04:03:59,137 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=9cbdba57d6e499a98b815c1373b16dad, ASSIGN; state=OFFLINE, location=388b5ced38b8,32979,1731297837975; forceNewPlan=false, retain=false 2024-11-11T04:03:59,288 INFO [388b5ced38b8:38915 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T04:03:59,288 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9cbdba57d6e499a98b815c1373b16dad, regionState=OPENING, regionLocation=388b5ced38b8,32979,1731297837975 2024-11-11T04:03:59,292 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 9cbdba57d6e499a98b815c1373b16dad, server=388b5ced38b8,32979,1731297837975}] 2024-11-11T04:03:59,445 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,32979,1731297837975 2024-11-11T04:03:59,446 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T04:03:59,448 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T04:03:59,453 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,453 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 9cbdba57d6e499a98b815c1373b16dad, NAME => 'hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:03:59,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:59,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,454 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,456 INFO [StoreOpener-9cbdba57d6e499a98b815c1373b16dad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,458 INFO [StoreOpener-9cbdba57d6e499a98b815c1373b16dad-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9cbdba57d6e499a98b815c1373b16dad columnFamilyName info 2024-11-11T04:03:59,458 DEBUG [StoreOpener-9cbdba57d6e499a98b815c1373b16dad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:59,458 INFO [StoreOpener-9cbdba57d6e499a98b815c1373b16dad-1 {}] regionserver.HStore(327): Store=9cbdba57d6e499a98b815c1373b16dad/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:03:59,460 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,460 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,463 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:03:59,468 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:03:59,469 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 9cbdba57d6e499a98b815c1373b16dad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62971606, jitterRate=-0.06164994835853577}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:03:59,470 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 9cbdba57d6e499a98b815c1373b16dad: 2024-11-11T04:03:59,472 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad., pid=6, masterSystemTime=1731297839445 2024-11-11T04:03:59,475 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,475 INFO [RS_OPEN_PRIORITY_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:03:59,476 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=9cbdba57d6e499a98b815c1373b16dad, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,32979,1731297837975 2024-11-11T04:03:59,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T04:03:59,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 9cbdba57d6e499a98b815c1373b16dad, server=388b5ced38b8,32979,1731297837975 in 188 msec 2024-11-11T04:03:59,487 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T04:03:59,487 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=9cbdba57d6e499a98b815c1373b16dad, ASSIGN in 348 msec 2024-11-11T04:03:59,488 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:03:59,489 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297839489"}]},"ts":"1731297839489"} 2024-11-11T04:03:59,492 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-11T04:03:59,534 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:03:59,536 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 476 msec 2024-11-11T04:03:59,561 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-11T04:03:59,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:59,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:59,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:59,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:59,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:03:59,571 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:59,572 INFO [RS-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:59,575 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-11T04:03:59,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:59,602 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 27 msec 2024-11-11T04:03:59,608 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-11T04:03:59,623 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T04:03:59,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 26 msec 2024-11-11T04:03:59,657 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-11T04:03:59,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 1.657sec 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T04:03:59,674 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T04:03:59,675 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T04:03:59,675 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T04:03:59,678 DEBUG [master/388b5ced38b8:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-11T04:03:59,678 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T04:03:59,679 INFO [master/388b5ced38b8:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=388b5ced38b8,38915,1731297837759-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T04:03:59,699 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b5329c9 to 127.0.0.1:63140 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@524aaf60 2024-11-11T04:03:59,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16728b6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T04:03:59,710 DEBUG [hconnection-0x11c4176e-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:03:59,717 INFO [RS-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59090, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:03:59,719 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=388b5ced38b8,38915,1731297837759 2024-11-11T04:03:59,723 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T04:03:59,725 INFO [RS-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44096, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T04:03:59,727 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T04:03:59,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestHBaseWalOnEC 2024-11-11T04:03:59,731 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T04:03:59,731 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:03:59,731 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestHBaseWalOnEC" procId is: 9 2024-11-11T04:03:59,733 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T04:03:59,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:59,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741839_1015 (size=392) 2024-11-11T04:03:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741839_1015 (size=392) 2024-11-11T04:03:59,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741839_1015 (size=392) 2024-11-11T04:03:59,755 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 56ec98ac7de92a9be0beee18a7d2fad3, NAME => 'TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestHBaseWalOnEC', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3 2024-11-11T04:03:59,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741840_1016 (size=51) 2024-11-11T04:03:59,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741840_1016 (size=51) 2024-11-11T04:03:59,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741840_1016 (size=51) 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1681): Closing 56ec98ac7de92a9be0beee18a7d2fad3, disabling compactions & flushes 2024-11-11T04:03:59,771 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. after waiting 0 ms 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:03:59,771 INFO [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:03:59,771 DEBUG [RegionOpenAndInit-TestHBaseWalOnEC-pool-0 {}] regionserver.HRegion(1635): Region close journal for 56ec98ac7de92a9be0beee18a7d2fad3: 2024-11-11T04:03:59,773 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T04:03:59,774 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.","families":{"info":[{"qualifier":"regioninfo","vlen":50,"tag":[],"timestamp":"1731297839773"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731297839773"}]},"ts":"1731297839773"} 2024-11-11T04:03:59,776 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T04:03:59,778 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T04:03:59,778 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297839778"}]},"ts":"1731297839778"} 2024-11-11T04:03:59,781 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLING in hbase:meta 2024-11-11T04:03:59,799 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {388b5ced38b8=0} racks are {/default-rack=0} 2024-11-11T04:03:59,800 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-11-11T04:03:59,800 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-11-11T04:03:59,800 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-11-11T04:03:59,800 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-11-11T04:03:59,800 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-11-11T04:03:59,800 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-11-11T04:03:59,800 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-11-11T04:03:59,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=56ec98ac7de92a9be0beee18a7d2fad3, ASSIGN}] 2024-11-11T04:03:59,802 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=56ec98ac7de92a9be0beee18a7d2fad3, ASSIGN 2024-11-11T04:03:59,804 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=56ec98ac7de92a9be0beee18a7d2fad3, ASSIGN; state=OFFLINE, location=388b5ced38b8,32979,1731297837975; forceNewPlan=false, retain=false 2024-11-11T04:03:59,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:03:59,954 INFO [388b5ced38b8:38915 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-11T04:03:59,954 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=56ec98ac7de92a9be0beee18a7d2fad3, regionState=OPENING, regionLocation=388b5ced38b8,32979,1731297837975 2024-11-11T04:03:59,957 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 56ec98ac7de92a9be0beee18a7d2fad3, server=388b5ced38b8,32979,1731297837975}] 2024-11-11T04:04:00,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:04:00,111 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,115 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,115 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 56ec98ac7de92a9be0beee18a7d2fad3, NAME => 'TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.', STARTKEY => '', ENDKEY => ''} 2024-11-11T04:04:00,116 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestHBaseWalOnEC 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,116 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T04:04:00,116 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,116 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,118 INFO [StoreOpener-56ec98ac7de92a9be0beee18a7d2fad3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,120 INFO [StoreOpener-56ec98ac7de92a9be0beee18a7d2fad3-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 56ec98ac7de92a9be0beee18a7d2fad3 columnFamilyName cf 2024-11-11T04:04:00,120 DEBUG [StoreOpener-56ec98ac7de92a9be0beee18a7d2fad3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T04:04:00,121 INFO [StoreOpener-56ec98ac7de92a9be0beee18a7d2fad3-1 {}] regionserver.HStore(327): Store=56ec98ac7de92a9be0beee18a7d2fad3/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T04:04:00,122 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,123 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,126 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,129 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T04:04:00,130 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 56ec98ac7de92a9be0beee18a7d2fad3; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68828033, jitterRate=0.02561761438846588}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T04:04:00,131 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 56ec98ac7de92a9be0beee18a7d2fad3: 2024-11-11T04:04:00,133 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3., pid=11, masterSystemTime=1731297840111 2024-11-11T04:04:00,135 DEBUG [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,136 INFO [RS_OPEN_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,137 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=56ec98ac7de92a9be0beee18a7d2fad3, regionState=OPEN, openSeqNum=2, regionLocation=388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-11T04:04:00,144 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 56ec98ac7de92a9be0beee18a7d2fad3, server=388b5ced38b8,32979,1731297837975 in 182 msec 2024-11-11T04:04:00,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T04:04:00,146 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestHBaseWalOnEC, region=56ec98ac7de92a9be0beee18a7d2fad3, ASSIGN in 342 msec 2024-11-11T04:04:00,147 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T04:04:00,148 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestHBaseWalOnEC","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731297840147"}]},"ts":"1731297840147"} 2024-11-11T04:04:00,150 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestHBaseWalOnEC, state=ENABLED in hbase:meta 2024-11-11T04:04:00,193 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestHBaseWalOnEC execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T04:04:00,197 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestHBaseWalOnEC in 466 msec 2024-11-11T04:04:00,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T04:04:00,341 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestHBaseWalOnEC, procId: 9 completed 2024-11-11T04:04:00,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table TestHBaseWalOnEC get assigned. Timeout = 60000ms 2024-11-11T04:04:00,341 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:04:00,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table TestHBaseWalOnEC assigned to meta. Checking AM states. 2024-11-11T04:04:00,346 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-11T04:04:00,346 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table TestHBaseWalOnEC assigned. 2024-11-11T04:04:00,349 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T04:04:00,351 INFO [RS-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48420, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T04:04:00,358 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestHBaseWalOnEC 2024-11-11T04:04:00,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC 2024-11-11T04:04:00,361 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_PREPARE 2024-11-11T04:04:00,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:04:00,363 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T04:04:00,363 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T04:04:00,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:04:00,515 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=32979 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T04:04:00,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,517 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 56ec98ac7de92a9be0beee18a7d2fad3 1/1 column families, dataSize=32 B heapSize=360 B 2024-11-11T04:04:00,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/.tmp/cf/b0a02fd2d68e46ba8456921d704b481b is 36, key is row/cf:cq/1731297840351/Put/seqid=0 2024-11-11T04:04:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741841_1017 (size=4787) 2024-11-11T04:04:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741841_1017 (size=4787) 2024-11-11T04:04:00,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741841_1017 (size=4787) 2024-11-11T04:04:00,540 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=32 B at sequenceid=5 (bloomFilter=false), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/.tmp/cf/b0a02fd2d68e46ba8456921d704b481b 2024-11-11T04:04:00,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/.tmp/cf/b0a02fd2d68e46ba8456921d704b481b as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/cf/b0a02fd2d68e46ba8456921d704b481b 2024-11-11T04:04:00,557 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/cf/b0a02fd2d68e46ba8456921d704b481b, entries=1, sequenceid=5, filesize=4.7 K 2024-11-11T04:04:00,559 INFO [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~32 B/32, heapSize ~344 B/344, currentSize=0 B/0 for 56ec98ac7de92a9be0beee18a7d2fad3 in 42ms, sequenceid=5, compaction requested=false 2024-11-11T04:04:00,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 56ec98ac7de92a9be0beee18a7d2fad3: 2024-11-11T04:04:00,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/388b5ced38b8:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-11T04:04:00,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-11T04:04:00,564 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-11T04:04:00,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 198 msec 2024-11-11T04:04:00,568 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestHBaseWalOnEC in 207 msec 2024-11-11T04:04:00,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38915 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T04:04:00,667 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestHBaseWalOnEC, procId: 12 completed 2024-11-11T04:04:00,675 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-11T04:04:00,676 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T04:04:00,676 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b5329c9 to 127.0.0.1:63140 2024-11-11T04:04:00,677 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,677 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T04:04:00,677 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1983428063, stopped=false 2024-11-11T04:04:00,677 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=388b5ced38b8,38915,1731297837759 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T04:04:00,723 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:04:00,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:04:00,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,38541,1731297837911' ***** 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:04:00,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,42791,1731297837943' ***** 2024-11-11T04:04:00,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,32979,1731297837975' ***** 2024-11-11T04:04:00,724 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T04:04:00,724 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:04:00,725 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:04:00,725 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T04:04:00,725 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:04:00,725 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:04:00,725 INFO [RS:2;388b5ced38b8:32979 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:04:00,725 INFO [RS:2;388b5ced38b8:32979 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:04:00,725 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(3579): Received CLOSE for 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:04:00,725 INFO [RS:1;388b5ced38b8:42791 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:04:00,725 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T04:04:00,725 INFO [RS:1;388b5ced38b8:42791 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T04:04:00,726 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,42791,1731297837943 2024-11-11T04:04:00,726 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(3579): Received CLOSE for 56ec98ac7de92a9be0beee18a7d2fad3 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T04:04:00,725 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:04:00,726 DEBUG [RS:1;388b5ced38b8:42791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,726 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T04:04:00,726 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,726 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,42791,1731297837943; all regions closed. 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,38541,1731297837911 2024-11-11T04:04:00,726 DEBUG [RS:2;388b5ced38b8:32979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,726 DEBUG [RS:0;388b5ced38b8:38541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,726 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 9cbdba57d6e499a98b815c1373b16dad, disabling compactions & flushes 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:04:00,726 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-11T04:04:00,726 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1603): Online Regions={9cbdba57d6e499a98b815c1373b16dad=hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad., 56ec98ac7de92a9be0beee18a7d2fad3=TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3.} 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:04:00,726 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:04:00,726 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:04:00,726 DEBUG [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1629): Waiting on 56ec98ac7de92a9be0beee18a7d2fad3, 9cbdba57d6e499a98b815c1373b16dad 2024-11-11T04:04:00,726 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. after waiting 0 ms 2024-11-11T04:04:00,726 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-11T04:04:00,726 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:04:00,727 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,42791,1731297837943 2024-11-11T04:04:00,727 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 9cbdba57d6e499a98b815c1373b16dad 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-11T04:04:00,727 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-11T04:04:00,727 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-11T04:04:00,727 DEBUG [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-11T04:04:00,727 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T04:04:00,727 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T04:04:00,727 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T04:04:00,727 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T04:04:00,728 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T04:04:00,728 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=2.51 KB heapSize=5.02 KB 2024-11-11T04:04:00,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741834_1010 (size=93) 2024-11-11T04:04:00,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741834_1010 (size=93) 2024-11-11T04:04:00,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741834_1010 (size=93) 2024-11-11T04:04:00,732 DEBUG [RS:1;388b5ced38b8:42791 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 388b5ced38b8%2C42791%2C1731297837943:(num 1731297838564) 2024-11-11T04:04:00,733 DEBUG [RS:1;388b5ced38b8:42791 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:04:00,733 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:04:00,733 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:04:00,734 INFO [RS:1;388b5ced38b8:42791 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:42791 2024-11-11T04:04:00,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:04:00,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,42791,1731297837943 2024-11-11T04:04:00,744 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/.tmp/info/95ef9577fa9e4f9e99e9804e7c9bf328 is 45, key is default/info:d/1731297839582/Put/seqid=0 2024-11-11T04:04:00,744 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/info/9c64cad6cb7740f6950ee877e48c50aa is 153, key is TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3./info:regioninfo/1731297840136/Put/seqid=0 2024-11-11T04:04:00,748 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,42791,1731297837943] 2024-11-11T04:04:00,748 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,42791,1731297837943; numProcessing=1 2024-11-11T04:04:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741843_1019 (size=7835) 2024-11-11T04:04:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741842_1018 (size=5037) 2024-11-11T04:04:00,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741842_1018 (size=5037) 2024-11-11T04:04:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741843_1019 (size=7835) 2024-11-11T04:04:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741842_1018 (size=5037) 2024-11-11T04:04:00,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741843_1019 (size=7835) 2024-11-11T04:04:00,753 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.32 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/info/9c64cad6cb7740f6950ee877e48c50aa 2024-11-11T04:04:00,755 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/.tmp/info/95ef9577fa9e4f9e99e9804e7c9bf328 2024-11-11T04:04:00,757 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,42791,1731297837943 already deleted, retry=false 2024-11-11T04:04:00,757 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,42791,1731297837943 expired; onlineServers=2 2024-11-11T04:04:00,762 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/.tmp/info/95ef9577fa9e4f9e99e9804e7c9bf328 as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/info/95ef9577fa9e4f9e99e9804e7c9bf328 2024-11-11T04:04:00,770 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/info/95ef9577fa9e4f9e99e9804e7c9bf328, entries=2, sequenceid=6, filesize=4.9 K 2024-11-11T04:04:00,771 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 9cbdba57d6e499a98b815c1373b16dad in 45ms, sequenceid=6, compaction requested=false 2024-11-11T04:04:00,775 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/namespace/9cbdba57d6e499a98b815c1373b16dad/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T04:04:00,776 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:04:00,776 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 9cbdba57d6e499a98b815c1373b16dad: 2024-11-11T04:04:00,776 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1731297839058.9cbdba57d6e499a98b815c1373b16dad. 2024-11-11T04:04:00,777 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 56ec98ac7de92a9be0beee18a7d2fad3, disabling compactions & flushes 2024-11-11T04:04:00,777 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,777 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,777 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. after waiting 0 ms 2024-11-11T04:04:00,777 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,779 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/table/e0250b2b591c4d34b12871ac43f00fc6 is 52, key is TestHBaseWalOnEC/table:state/1731297840147/Put/seqid=0 2024-11-11T04:04:00,781 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/default/TestHBaseWalOnEC/56ec98ac7de92a9be0beee18a7d2fad3/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-11T04:04:00,782 INFO [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,782 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 56ec98ac7de92a9be0beee18a7d2fad3: 2024-11-11T04:04:00,783 DEBUG [RS_CLOSE_REGION-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestHBaseWalOnEC,,1731297839727.56ec98ac7de92a9be0beee18a7d2fad3. 2024-11-11T04:04:00,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741844_1020 (size=5347) 2024-11-11T04:04:00,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741844_1020 (size=5347) 2024-11-11T04:04:00,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741844_1020 (size=5347) 2024-11-11T04:04:00,786 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/table/e0250b2b591c4d34b12871ac43f00fc6 2024-11-11T04:04:00,795 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/info/9c64cad6cb7740f6950ee877e48c50aa as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/info/9c64cad6cb7740f6950ee877e48c50aa 2024-11-11T04:04:00,803 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/info/9c64cad6cb7740f6950ee877e48c50aa, entries=20, sequenceid=14, filesize=7.7 K 2024-11-11T04:04:00,804 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/.tmp/table/e0250b2b591c4d34b12871ac43f00fc6 as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/table/e0250b2b591c4d34b12871ac43f00fc6 2024-11-11T04:04:00,812 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/table/e0250b2b591c4d34b12871ac43f00fc6, entries=4, sequenceid=14, filesize=5.2 K 2024-11-11T04:04:00,813 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~2.51 KB/2567, heapSize ~4.74 KB/4856, currentSize=0 B/0 for 1588230740 in 85ms, sequenceid=14, compaction requested=false 2024-11-11T04:04:00,818 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/data/hbase/meta/1588230740/recovered.edits/17.seqid, newMaxSeqId=17, maxSeqId=1 2024-11-11T04:04:00,819 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T04:04:00,819 INFO [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T04:04:00,819 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T04:04:00,819 DEBUG [RS_CLOSE_META-regionserver/388b5ced38b8:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T04:04:00,820 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,820 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,824 INFO [regionserver/388b5ced38b8:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,849 INFO [RS:1;388b5ced38b8:42791 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,42791,1731297837943; zookeeper connection closed. 2024-11-11T04:04:00,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:00,849 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42791-0x101295a251d0002, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:00,849 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@27c764a3 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@27c764a3 2024-11-11T04:04:00,927 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,32979,1731297837975; all regions closed. 2024-11-11T04:04:00,927 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,38541,1731297837911; all regions closed. 2024-11-11T04:04:00,927 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,927 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911 2024-11-11T04:04:00,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741836_1012 (size=4015) 2024-11-11T04:04:00,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741833_1009 (size=2619) 2024-11-11T04:04:00,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741836_1012 (size=4015) 2024-11-11T04:04:00,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741833_1009 (size=2619) 2024-11-11T04:04:00,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741833_1009 (size=2619) 2024-11-11T04:04:00,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741836_1012 (size=4015) 2024-11-11T04:04:00,934 DEBUG [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs 2024-11-11T04:04:00,934 INFO [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 388b5ced38b8%2C38541%2C1731297837911.meta:.meta(num 1731297838977) 2024-11-11T04:04:00,934 DEBUG [RS:2;388b5ced38b8:32979 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs 2024-11-11T04:04:00,934 INFO [RS:2;388b5ced38b8:32979 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 388b5ced38b8%2C32979%2C1731297837975:(num 1731297838556) 2024-11-11T04:04:00,935 DEBUG [RS:2;388b5ced38b8:32979 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,935 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,935 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/WALs/388b5ced38b8,38541,1731297837911 2024-11-11T04:04:00,936 INFO [RS:2;388b5ced38b8:32979 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T04:04:00,936 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T04:04:00,936 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:04:00,936 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T04:04:00,936 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T04:04:00,936 INFO [RS:2;388b5ced38b8:32979 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:32979 2024-11-11T04:04:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741835_1011 (size=93) 2024-11-11T04:04:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741835_1011 (size=93) 2024-11-11T04:04:00,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741835_1011 (size=93) 2024-11-11T04:04:00,940 DEBUG [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/oldWALs 2024-11-11T04:04:00,940 INFO [RS:0;388b5ced38b8:38541 {}] wal.AbstractFSWAL(1074): Closed WAL: FSHLog 388b5ced38b8%2C38541%2C1731297837911:(num 1731297838566) 2024-11-11T04:04:00,940 DEBUG [RS:0;388b5ced38b8:38541 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:00,941 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T04:04:00,941 INFO [RS:0;388b5ced38b8:38541 {}] hbase.ChoreService(370): Chore service for: regionserver/388b5ced38b8:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-11T04:04:00,941 INFO [regionserver/388b5ced38b8:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:04:00,941 INFO [RS:0;388b5ced38b8:38541 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38541 2024-11-11T04:04:00,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,32979,1731297837975 2024-11-11T04:04:00,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T04:04:00,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/388b5ced38b8,38541,1731297837911 2024-11-11T04:04:00,965 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,38541,1731297837911] 2024-11-11T04:04:00,965 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,38541,1731297837911; numProcessing=2 2024-11-11T04:04:00,982 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,38541,1731297837911 already deleted, retry=false 2024-11-11T04:04:00,982 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,38541,1731297837911 expired; onlineServers=1 2024-11-11T04:04:00,982 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [388b5ced38b8,32979,1731297837975] 2024-11-11T04:04:00,982 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 388b5ced38b8,32979,1731297837975; numProcessing=3 2024-11-11T04:04:01,012 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/388b5ced38b8,32979,1731297837975 already deleted, retry=false 2024-11-11T04:04:01,012 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 388b5ced38b8,32979,1731297837975 expired; onlineServers=0 2024-11-11T04:04:01,012 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '388b5ced38b8,38915,1731297837759' ***** 2024-11-11T04:04:01,012 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T04:04:01,012 DEBUG [M:0;388b5ced38b8:38915 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b5d373c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=388b5ced38b8/172.17.0.2:0 2024-11-11T04:04:01,012 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegionServer(1224): stopping server 388b5ced38b8,38915,1731297837759 2024-11-11T04:04:01,012 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegionServer(1250): stopping server 388b5ced38b8,38915,1731297837759; all regions closed. 2024-11-11T04:04:01,012 DEBUG [M:0;388b5ced38b8:38915 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T04:04:01,013 DEBUG [M:0;388b5ced38b8:38915 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T04:04:01,013 DEBUG [M:0;388b5ced38b8:38915 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T04:04:01,013 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T04:04:01,013 DEBUG [master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297838290 {}] cleaner.HFileCleaner(306): Exit Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.small.0-1731297838290,5,FailOnTimeoutGroup] 2024-11-11T04:04:01,013 DEBUG [master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297838290 {}] cleaner.HFileCleaner(306): Exit Thread[master/388b5ced38b8:0:becomeActiveMaster-HFileCleaner.large.0-1731297838290,5,FailOnTimeoutGroup] 2024-11-11T04:04:01,013 INFO [M:0;388b5ced38b8:38915 {}] hbase.ChoreService(370): Chore service for: master/388b5ced38b8:0 had [] on shutdown 2024-11-11T04:04:01,013 DEBUG [M:0;388b5ced38b8:38915 {}] master.HMaster(1733): Stopping service threads 2024-11-11T04:04:01,013 INFO [M:0;388b5ced38b8:38915 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T04:04:01,013 INFO [M:0;388b5ced38b8:38915 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T04:04:01,014 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T04:04:01,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T04:04:01,023 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T04:04:01,023 DEBUG [M:0;388b5ced38b8:38915 {}] zookeeper.ZKUtil(347): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T04:04:01,024 WARN [M:0;388b5ced38b8:38915 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T04:04:01,024 INFO [M:0;388b5ced38b8:38915 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-11T04:04:01,024 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T04:04:01,024 INFO [M:0;388b5ced38b8:38915 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T04:04:01,024 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T04:04:01,024 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:04:01,024 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:04:01,024 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T04:04:01,024 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:04:01,025 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.82 KB heapSize=54.28 KB 2024-11-11T04:04:01,044 DEBUG [M:0;388b5ced38b8:38915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ceaf2d168b3f4a6eaf28e331c02c3632 is 82, key is hbase:meta,,1/info:regioninfo/1731297839014/Put/seqid=0 2024-11-11T04:04:01,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741845_1021 (size=5672) 2024-11-11T04:04:01,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741845_1021 (size=5672) 2024-11-11T04:04:01,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741845_1021 (size=5672) 2024-11-11T04:04:01,051 INFO [M:0;388b5ced38b8:38915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ceaf2d168b3f4a6eaf28e331c02c3632 2024-11-11T04:04:01,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,065 INFO [RS:2;388b5ced38b8:32979 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,32979,1731297837975; zookeeper connection closed. 2024-11-11T04:04:01,065 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:32979-0x101295a251d0003, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,065 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1575dd78 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1575dd78 2024-11-11T04:04:01,072 DEBUG [M:0;388b5ced38b8:38915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a53a0d14feb5421fbbdd3d526e5be8ff is 749, key is \x00\x00\x00\x00\x00\x00\x00\x09/proc:d/1731297840195/Put/seqid=0 2024-11-11T04:04:01,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,074 INFO [RS:0;388b5ced38b8:38541 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,38541,1731297837911; zookeeper connection closed. 2024-11-11T04:04:01,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38541-0x101295a251d0001, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,074 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@f6306a9 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@f6306a9 2024-11-11T04:04:01,074 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-11T04:04:01,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741846_1022 (size=7800) 2024-11-11T04:04:01,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741846_1022 (size=7800) 2024-11-11T04:04:01,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741846_1022 (size=7800) 2024-11-11T04:04:01,079 INFO [M:0;388b5ced38b8:38915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.14 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a53a0d14feb5421fbbdd3d526e5be8ff 2024-11-11T04:04:01,100 DEBUG [M:0;388b5ced38b8:38915 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ae03da9fa34430e929b465c0c6683e3 is 69, key is 388b5ced38b8,32979,1731297837975/rs:state/1731297838354/Put/seqid=0 2024-11-11T04:04:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741847_1023 (size=5294) 2024-11-11T04:04:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741847_1023 (size=5294) 2024-11-11T04:04:01,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741847_1023 (size=5294) 2024-11-11T04:04:01,107 INFO [M:0;388b5ced38b8:38915 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=195 B at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ae03da9fa34430e929b465c0c6683e3 2024-11-11T04:04:01,113 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ceaf2d168b3f4a6eaf28e331c02c3632 as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ceaf2d168b3f4a6eaf28e331c02c3632 2024-11-11T04:04:01,119 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ceaf2d168b3f4a6eaf28e331c02c3632, entries=8, sequenceid=111, filesize=5.5 K 2024-11-11T04:04:01,120 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/a53a0d14feb5421fbbdd3d526e5be8ff as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a53a0d14feb5421fbbdd3d526e5be8ff 2024-11-11T04:04:01,126 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/a53a0d14feb5421fbbdd3d526e5be8ff, entries=13, sequenceid=111, filesize=7.6 K 2024-11-11T04:04:01,127 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/6ae03da9fa34430e929b465c0c6683e3 as hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ae03da9fa34430e929b465c0c6683e3 2024-11-11T04:04:01,134 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:34003/user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/6ae03da9fa34430e929b465c0c6683e3, entries=3, sequenceid=111, filesize=5.2 K 2024-11-11T04:04:01,135 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(3040): Finished flush of dataSize ~43.82 KB/44872, heapSize ~53.98 KB/55280, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 111ms, sequenceid=111, compaction requested=false 2024-11-11T04:04:01,137 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T04:04:01,137 DEBUG [M:0;388b5ced38b8:38915 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T04:04:01,137 DEBUG [WAL-Shutdown-0 {}] wal.FSHLog(499): Closing WAL writer in /user/jenkins/test-data/102c7696-9ae4-2391-43a7-fddffb51b7a3/MasterData/WALs/388b5ced38b8,38915,1731297837759 2024-11-11T04:04:01,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37665 is added to blk_1073741830_1006 (size=52587) 2024-11-11T04:04:01,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44705 is added to blk_1073741830_1006 (size=52587) 2024-11-11T04:04:01,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45571 is added to blk_1073741830_1006 (size=52587) 2024-11-11T04:04:01,140 INFO [M:0;388b5ced38b8:38915 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-11T04:04:01,140 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T04:04:01,140 INFO [M:0;388b5ced38b8:38915 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:38915 2024-11-11T04:04:01,148 DEBUG [M:0;388b5ced38b8:38915 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/388b5ced38b8,38915,1731297837759 already deleted, retry=false 2024-11-11T04:04:01,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,257 INFO [M:0;388b5ced38b8:38915 {}] regionserver.HRegionServer(1307): Exiting; stopping=388b5ced38b8,38915,1731297837759; zookeeper connection closed. 2024-11-11T04:04:01,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38915-0x101295a251d0000, quorum=127.0.0.1:63140, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T04:04:01,259 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4d486b54{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:04:01,259 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7e2e33b1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:04:01,259 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:04:01,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1146e519{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:04:01,260 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37ab8214{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,STOPPED} 2024-11-11T04:04:01,261 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:04:01,261 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:04:01,261 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:04:01,261 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2067460977-172.17.0.2-1731297835580 (Datanode Uuid c961dbae-8b51-4b94-8975-f4ab35153047) service to localhost/127.0.0.1:34003 2024-11-11T04:04:01,261 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data5/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,262 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data6/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,262 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:04:01,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@35ecfb16{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:04:01,264 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5ad4b412{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:04:01,264 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:04:01,264 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bdb6514{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:04:01,265 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bb43762{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,STOPPED} 2024-11-11T04:04:01,266 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:04:01,266 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:04:01,266 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:04:01,266 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2067460977-172.17.0.2-1731297835580 (Datanode Uuid 662e5429-60ae-4511-baf9-0321b1fbea7c) service to localhost/127.0.0.1:34003 2024-11-11T04:04:01,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data3/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,267 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data4/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,267 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:04:01,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@71fc4ad0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T04:04:01,269 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b3638bf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:04:01,269 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:04:01,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59792bf6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:04:01,269 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@464ee984{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,STOPPED} 2024-11-11T04:04:01,270 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T04:04:01,270 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T04:04:01,271 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T04:04:01,271 WARN [BP-2067460977-172.17.0.2-1731297835580 heartbeating to localhost/127.0.0.1:34003 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2067460977-172.17.0.2-1731297835580 (Datanode Uuid f060a0a8-a37a-43b2-b990-b632b26e6aff) service to localhost/127.0.0.1:34003 2024-11-11T04:04:01,271 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data1/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,272 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/cluster_7ac73f71-febc-6352-7c43-f4c43b589bc2/dfs/data/data2/current/BP-2067460977-172.17.0.2-1731297835580 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T04:04:01,272 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T04:04:01,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b189467{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T04:04:01,277 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@41822124{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T04:04:01,277 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T04:04:01,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@953b9cf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T04:04:01,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@57f6d17f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/3ff599be-d410-9ab0-5002-e2f13cad50e0/hadoop.log.dir/,STOPPED} 2024-11-11T04:04:01,284 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-11T04:04:01,315 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down 2024-11-11T04:04:01,321 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestHBaseWalOnEC#testReadWrite[1] Thread=120 (was 85) - Thread LEAK? -, OpenFileDescriptor=517 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=497 (was 497), ProcessCount=11 (was 11), AvailableMemoryMB=5014 (was 5168)